[haiku-commits] BRANCH pdziepak-github.scheduler [cec16c2] in src/system/kernel/scheduler: . src/system/kernel

  • From: pdziepak-github.scheduler <community@xxxxxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 24 Nov 2013 23:00:34 +0100 (CET)

added 2 changesets to branch 'refs/remotes/pdziepak-github/scheduler'
old head: a04b7d9f96772b54819061e3716f1e273526a84c
new head: cec16c2dcfb0bddb0d9dc11fb63793c4ca9a53e0
overview: https://github.com/pdziepak/Haiku/compare/a04b7d9...cec16c2

----------------------------------------------------------------------------

03f7d3d: kernel: Restore logical processor disabling

cec16c2: spinlock: Fix panic messages
  
  Thanks Jérôme for pointing this out.

                                    [ Pawel Dziepak <pdziepak@xxxxxxxxxxx> ]

----------------------------------------------------------------------------

7 files changed, 249 insertions(+), 48 deletions(-)
headers/private/kernel/kscheduler.h       |   2 +
headers/private/kernel/thread.h           |   2 +
src/system/kernel/cpu.cpp                 |  40 ++++--
src/system/kernel/int.cpp                 |  26 ++--
src/system/kernel/scheduler/scheduler.cpp | 184 ++++++++++++++++++++++++--
src/system/kernel/smp.cpp                 |  28 ++--
src/system/kernel/thread.cpp              |  15 ++-

############################################################################

Commit:      03f7d3d1db10c0a467bea944fb3230ee70c6e015
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Sun Nov 24 21:51:07 2013 UTC

kernel: Restore logical processor disabling

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/kscheduler.h 
b/headers/private/kernel/kscheduler.h
index 92a82e9..2d8222e 100644
--- a/headers/private/kernel/kscheduler.h
+++ b/headers/private/kernel/kscheduler.h
@@ -83,6 +83,8 @@ status_t scheduler_set_operation_mode(scheduler_mode mode);
 */
 void scheduler_dump_thread_data(Thread* thread);
 
+void scheduler_set_cpu_enabled(int32 cpu, bool enabled);
+
 void scheduler_add_listener(struct SchedulerListener* listener);
 void scheduler_remove_listener(struct SchedulerListener* listener);
 
diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h
index 72d123b..61c6638 100644
--- a/headers/private/kernel/thread.h
+++ b/headers/private/kernel/thread.h
@@ -87,6 +87,8 @@ status_t thread_preboot_init_percpu(struct kernel_args *args, 
int32 cpuNum);
 void thread_yield(void);
 void thread_exit(void);
 
+void thread_map(void (*function)(Thread* thread, void* data), void* data);
+
 int32 thread_max_threads(void);
 int32 thread_used_threads(void);
 
diff --git a/src/system/kernel/cpu.cpp b/src/system/kernel/cpu.cpp
index 433d567..5ab8d26 100644
--- a/src/system/kernel/cpu.cpp
+++ b/src/system/kernel/cpu.cpp
@@ -18,6 +18,7 @@
 #include <cpufreq.h>
 
 #include <boot/kernel_args.h>
+#include <kscheduler.h>
 #include <thread_types.h>
 #include <util/AutoLock.h>
 
@@ -273,7 +274,6 @@ _user_cpu_enabled(int32 cpu)
 status_t
 _user_set_cpu_enabled(int32 cpu, bool enabled)
 {
-       status_t status = B_OK;
        cpu_status state;
        int32 i, count;
 
@@ -283,8 +283,7 @@ _user_set_cpu_enabled(int32 cpu, bool enabled)
        // We need to lock here to make sure that no one can disable
        // the last CPU
 
-       state = disable_interrupts();
-       acquire_spinlock(&sSetCpuLock);
+       InterruptsSpinLocker locker(sSetCpuLock);
 
        if (!enabled) {
                // check if this is the last CPU to be disabled
@@ -294,14 +293,37 @@ _user_set_cpu_enabled(int32 cpu, bool enabled)
                }
 
                if (count == 1)
-                       status = B_NOT_ALLOWED;
+                       return B_NOT_ALLOWED;
        }
 
-       if (status == B_OK)
-               gCPU[cpu].disabled = !enabled;
+       bool oldState = gCPU[cpu].disabled;
 
-       release_spinlock(&sSetCpuLock);
-       restore_interrupts(state);
-       return status;
+       if (oldState != !enabled)
+               scheduler_set_cpu_enabled(cpu, enabled);
+
+       if (!enabled) {
+               if (smp_get_current_cpu() == cpu) {
+                       locker.Unlock();
+                       thread_yield();
+                       locker.Lock();
+               }
+
+               // someone reenabled the CPU while we were rescheduling
+               if (!gCPU[cpu].disabled)
+                       return B_OK;
+
+               ASSERT(smp_get_current_cpu() != cpu);
+               while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
+                       locker.Unlock();
+                       thread_yield();
+                       locker.Lock();
+
+                       if (!gCPU[cpu].disabled)
+                               return B_OK;
+                       ASSERT(smp_get_current_cpu() != cpu);
+               }
+       }
+
+       return B_OK;
 }
 
diff --git a/src/system/kernel/int.cpp b/src/system/kernel/int.cpp
index baed8fd..5a0bebf 100644
--- a/src/system/kernel/int.cpp
+++ b/src/system/kernel/int.cpp
@@ -391,14 +391,17 @@ restore_interrupts(cpu_status status)
 static
 uint32 assign_cpu(void)
 {
-       int32 nextID = atomic_add(&sLastCPU, 1);
-       cpu_topology_node* node = get_cpu_topology();
-
-       while (node->level != CPU_TOPOLOGY_SMT) {
-               int levelSize = node->children_count;
-               node = node->children[nextID % levelSize];
-               nextID /= levelSize;
-       }
+       cpu_topology_node* node;
+       do {
+               int32 nextID = atomic_add(&sLastCPU, 1);
+               node = get_cpu_topology();
+
+               while (node->level != CPU_TOPOLOGY_SMT) {
+                       int levelSize = node->children_count;
+                       node = node->children[nextID % levelSize];
+                       nextID /= levelSize;
+               }
+       } while (gCPU[node->id].disabled);
 
        return node->id;
 }
@@ -680,6 +683,12 @@ void assign_io_interrupt_to_cpu(long vector, int32 newCPU)
 
        int32 oldCPU = sVectors[vector].assigned_cpu.cpu;
 
+       if (newCPU == -1)
+               newCPU = assign_cpu();
+       dprintf_no_syslog("IRQ %ld CPU %" B_PRId32 " -> CPU %" B_PRId32 "\n", 
vector, oldCPU, newCPU);
+       if (newCPU == oldCPU)
+               return;
+
        ASSERT(oldCPU != -1);
        cpu_ent* cpu = &gCPU[oldCPU];
 
@@ -693,6 +702,5 @@ void assign_io_interrupt_to_cpu(long vector, int32 newCPU)
        sVectors[vector].assigned_cpu.cpu = newCPU;
        arch_int_assign_to_cpu(vector, newCPU);
        list_add_item(&cpu->irqs, &sVectors[vector].assigned_cpu);
-       locker.Unlock();
 }
 
diff --git a/src/system/kernel/scheduler/scheduler.cpp 
b/src/system/kernel/scheduler/scheduler.cpp
index 105d7fa..bc4fd94 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -399,6 +399,11 @@ update_load_heaps(int32 core)
 
        CoreEntry* entry = &gCoreEntries[core];
 
+       if (entry->fCPUCount == 0) {
+               entry->fLoad = 0;
+               return;
+       }
+
        WriteSpinLocker coreLocker(gCoreHeapsLock);
 
        int32 newKey = get_core_load(entry);
@@ -626,12 +631,9 @@ thread_goes_away(Thread* thread)
 
        scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
 
-       ASSERT(schedulerThreadData->previous_core >= 0);
-       int32 core = schedulerThreadData->previous_core;
-
        schedulerThreadData->went_sleep = system_time();
        schedulerThreadData->went_sleep_active
-               = atomic_get64(&gCoreEntries[core].fActiveTime);
+               = 
atomic_get64(&gCoreEntries[smp_get_current_cpu()].fActiveTime);
 }
 
 
@@ -1028,7 +1030,7 @@ track_cpu_activity(Thread* oldThread, Thread* nextThread, 
int32 thisCore)
                atomic_add64(&gCoreEntries[thisCore].fActiveTime, active);
        }
 
-       if (!gSingleCore)
+       if (!gSingleCore && !gCPU[smp_get_current_cpu()].disabled)
                compute_cpu_load(smp_get_current_cpu());
 
        int32 oldPriority = get_effective_priority(oldThread);
@@ -1150,9 +1152,24 @@ _scheduler_reschedule(void)
        schedulerOldThreadData->lost_cpu = false;
 
        // select thread with the biggest priority and enqueue back the old 
thread
-       Thread* nextThread
-               = choose_next_thread(thisCPU, enqueueOldThread ? oldThread : 
NULL,
-                       putOldThreadAtBack);
+       Thread* nextThread;
+       if (gCPU[thisCPU].disabled) {
+               if (!thread_is_idle_thread(oldThread)) {
+                       SpinLocker 
runQueueLocker(gCoreEntries[thisCore].fQueueLock);
+
+                       nextThread = 
gPinnedRunQueues[thisCPU].GetHead(B_IDLE_PRIORITY);
+                       gPinnedRunQueues[thisCPU].Remove(nextThread);
+                       nextThread->scheduler_data->enqueued = false;
+
+                       putOldThreadAtBack = oldThread->pinned_to_cpu == 0;
+               } else
+                       nextThread = oldThread;
+       } else {
+               nextThread
+                       = choose_next_thread(thisCPU, enqueueOldThread ? 
oldThread : NULL,
+                               putOldThreadAtBack);
+       }
+
        if (nextThread != oldThread) {
                if (enqueueOldThread) {
                        if (putOldThreadAtBack)
@@ -1174,7 +1191,8 @@ _scheduler_reschedule(void)
                oldThread, nextThread);
 
        // update CPU heap
-       update_cpu_priority(thisCPU, get_effective_priority(nextThread));
+       if (!gCPU[thisCPU].disabled)
+               update_cpu_priority(thisCPU, 
get_effective_priority(nextThread));
 
        nextThread->state = B_THREAD_RUNNING;
        nextThread->next_state = B_THREAD_READY;
@@ -1281,6 +1299,22 @@ scheduler_start(void)
 }
 
 
+static inline void
+acquire_big_scheduler_lock(void)
+{
+       for (int32_t i = 0; i < smp_get_num_cpus(); i++)
+               acquire_write_spinlock(&gCPUEntries[i].fSchedulerModeLock);
+}
+
+
+static inline void
+release_big_scheduler_lock(void)
+{
+       for (int32_t i = 0; i < smp_get_num_cpus(); i++)
+               release_write_spinlock(&gCPUEntries[i].fSchedulerModeLock);
+}
+
+
 status_t
 scheduler_set_operation_mode(scheduler_mode mode)
 {
@@ -1292,21 +1326,145 @@ scheduler_set_operation_mode(scheduler_mode mode)
        dprintf("scheduler: switching to %s mode\n", 
sSchedulerModes[mode]->name);
 
        InterruptsLocker _;
-       for (int32_t i = 0; i < smp_get_num_cpus(); i++)
-               acquire_write_spinlock(&gCPUEntries[i].fSchedulerModeLock);
+       acquire_big_scheduler_lock();
 
        sCurrentModeID = mode;
        sCurrentMode = sSchedulerModes[mode];
        sCurrentMode->switch_to_mode();
 
-       for (int32_t i = 0; i < smp_get_num_cpus(); i++)
-               release_write_spinlock(&gCPUEntries[i].fSchedulerModeLock);
+       release_big_scheduler_lock();
 
        return B_OK;
 }
 
 
 static void
+unassign_thread(Thread* thread, void* data)
+{
+       int32 core = *(int32*)data;
+
+       if (thread->scheduler_data->previous_core == core
+               && thread->pinned_to_cpu == 0) {
+               thread->scheduler_data->previous_core = -1;
+       }
+}
+
+
+void
+scheduler_set_cpu_enabled(int32 cpu, bool enabled)
+{
+       dprintf("scheduler: %s CPU %" B_PRId32 "\n",
+               enabled ? "enabling" : "disabling", cpu);
+
+       InterruptsLocker _;
+       acquire_big_scheduler_lock();
+
+       gCPU[cpu].disabled = !enabled;
+
+       CoreEntry* core = &gCoreEntries[gCPUToCore[cpu]];
+       PackageEntry* package = &gPackageEntries[gCPUToPackage[cpu]];
+
+       int32 oldCPUCount = core->fCPUCount;
+       ASSERT(oldCPUCount >= 0);
+       if (enabled)
+               core->fCPUCount++;
+       else {
+               update_cpu_priority(cpu, B_IDLE_PRIORITY);
+               core->fCPUCount--;
+       }
+
+       if (core->fCPUCount == 0) {
+               // core has been disabled
+               ASSERT(!enabled);
+
+               int32 load = CoreLoadHeap::GetKey(core);
+               if (load > kHighLoad) {
+                       gCoreHighLoadHeap->ModifyKey(core, -1);
+                       ASSERT(gCoreHighLoadHeap->PeekMinimum() == core);
+                       gCoreHighLoadHeap->RemoveMinimum();
+               } else {
+                       gCoreLoadHeap->ModifyKey(core, -1);
+                       ASSERT(gCoreLoadHeap->PeekMinimum() == core);
+                       gCoreLoadHeap->RemoveMinimum();
+               }
+
+               package->fIdleCores.Remove(core);
+               package->fIdleCoreCount--;
+               package->fCoreCount--;
+
+               if (package->fCoreCount == 0)
+                       gIdlePackageList->Remove(package);
+
+               // get rid of threads
+               thread_map(unassign_thread, &core->fCoreID);
+
+               while (gRunQueues[core->fCoreID].PeekMaximum() != NULL) {
+                       Thread* thread = 
gRunQueues[core->fCoreID].PeekMaximum();
+                       gRunQueues[core->fCoreID].Remove(thread);
+                       thread->scheduler_data->enqueued = false;
+
+                       ASSERT(thread->scheduler_data->previous_core == -1);
+                       enqueue(thread, false);
+               }
+       } else if (oldCPUCount == 0) {
+               // core has been reenabled
+               ASSERT(enabled);
+
+               gCPUEntries[cpu].fLoad = 0;
+               core->fLoad = 0;
+               gCoreLoadHeap->Insert(core, 0);
+
+               package->fCoreCount++;
+               package->fIdleCoreCount++;
+               package->fIdleCores.Add(core);
+
+               if (package->fCoreCount == 1)
+                       gIdlePackageList->Add(package);         
+       }
+
+       if (enabled) {
+               gCPUPriorityHeaps[core->fCoreID].Insert(&gCPUEntries[cpu],
+                       B_IDLE_PRIORITY);
+               gCPUEntries[cpu].fLoad = 0;
+       } else {
+               gCPUPriorityHeaps[core->fCoreID].ModifyKey(&gCPUEntries[cpu],
+                       THREAD_MAX_SET_PRIORITY + 1);
+               ASSERT(gCPUPriorityHeaps[core->fCoreID].PeekMaximum()
+                       == &gCPUEntries[cpu]);
+               gCPUPriorityHeaps[core->fCoreID].RemoveMaximum();
+
+               core->fLoad -= gCPUEntries[cpu].fLoad;
+       }
+
+       if (!enabled) {
+               cpu_ent* entry = &gCPU[cpu];
+
+               // get rid of irqs
+               SpinLocker locker(entry->irqs_lock);
+               irq_assignment* irq
+                       = (irq_assignment*)list_get_first_item(&entry->irqs);
+               while (irq != NULL) {
+                       locker.Unlock();
+
+                       assign_io_interrupt_to_cpu(irq->irq, -1);
+
+                       locker.Lock();
+                       irq = 
(irq_assignment*)list_get_first_item(&entry->irqs);
+               }
+               locker.Unlock();
+
+               // don't wait until the thread quantum ends
+               if (smp_get_current_cpu() != cpu) {
+                       smp_send_ici(cpu, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
+                               SMP_MSG_FLAG_ASYNC);
+               }
+       }
+
+       release_big_scheduler_lock();
+}
+
+
+static void
 traverse_topology_tree(cpu_topology_node* node, int packageID, int coreID)
 {
        switch (node->level) {
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
index 3aee21c..ca66a46 100644
--- a/src/system/kernel/thread.cpp
+++ b/src/system/kernel/thread.cpp
@@ -2408,8 +2408,6 @@ peek_next_thread_id()
 void
 thread_yield(void)
 {
-       // Yielding is for being nice, not for making things work.
-#if !KDEBUG
        Thread *thread = thread_get_current_thread();
        if (thread == NULL)
                return;
@@ -2418,7 +2416,18 @@ thread_yield(void)
 
        thread->has_yielded = true;
        scheduler_reschedule();
-#endif
+}
+
+
+void
+thread_map(void (*function)(Thread* thread, void* data), void* data)
+{
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
+
+       for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
+               Thread* thread = it.Next();) {
+               function(thread, data);
+       }
 }
 
 

############################################################################

Commit:      cec16c2dcfb0bddb0d9dc11fb63793c4ca9a53e0
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Sun Nov 24 21:54:14 2013 UTC

spinlock: Fix panic messages

Thanks Jérôme for pointing this out.

----------------------------------------------------------------------------

diff --git a/src/system/kernel/smp.cpp b/src/system/kernel/smp.cpp
index 08203ef..a07df3f 100644
--- a/src/system/kernel/smp.cpp
+++ b/src/system/kernel/smp.cpp
@@ -532,13 +532,13 @@ try_acquire_write_spinlock(rw_spinlock* lock)
 {
 #if DEBUG_SPINLOCKS
        if (are_interrupts_enabled()) {
-               panic("try_acquire_write_spinlock: attempt to acquire lock %p 
with"
-                       " interrupts enabled", lock);
+               panic("try_acquire_write_spinlock: attempt to acquire lock %p 
with "
+                       "interrupts enabled", lock);
        }
 
        if (sNumCPUs < 2 && lock->lock != 0) {
-               panic("acquire_spinlock_cpu(): attempt to acquire lock %p twice 
on "
-                       "non-SMP system", lock);
+               panic("try_acquire_write_spinlock(): attempt to acquire lock %p 
twice "
+                       "on non-SMP system", lock);
        }
 #endif
 
@@ -551,8 +551,8 @@ acquire_write_spinlock(rw_spinlock* lock)
 {
 #if DEBUG_SPINLOCKS
        if (are_interrupts_enabled()) {
-               panic("acquire_write_spinlock: attempt to acquire lock %p with"
-                       " interrupts enabled", lock);
+               panic("acquire_write_spinlock: attempt to acquire lock %p with "
+                       "interrupts enabled", lock);
        }
 #endif
 
@@ -582,8 +582,8 @@ release_write_spinlock(rw_spinlock* lock)
 #if DEBUG_SPINLOCKS
        uint32 previous = atomic_get_and_set(&lock->lock, 0);
        if ((previous & 1u << 31) == 0) {
-               panic("release_write_spinlock: lock %p was already released 
(value:"
-                       " %x)\n", lock, previous);
+               panic("release_write_spinlock: lock %p was already released 
(value: "
+                       "%x)\n", lock, previous);
        }
 #else
        atomic_set(&lock->lock, 0);
@@ -596,13 +596,13 @@ try_acquire_read_spinlock(rw_spinlock* lock)
 {
 #if DEBUG_SPINLOCKS
        if (are_interrupts_enabled()) {
-               panic("try_acquire_read_spinlock: attempt to acquire lock %p 
with"
-                       " interrupts enabled", lock);
+               panic("try_acquire_read_spinlock: attempt to acquire lock %p 
with "
+                       "interrupts enabled", lock);
        }
 
        if (sNumCPUs < 2 && lock->lock != 0) {
-               panic("acquire_spinlock_cpu(): attempt to acquire lock %p twice 
on "
-                       "non-SMP system", lock);
+               panic("try_acquire_read_spinlock(): attempt to acquire lock %p 
twice "
+                       "on non-SMP system", lock);
        }
 #endif
 
@@ -620,8 +620,8 @@ acquire_read_spinlock(rw_spinlock* lock)
 {
 #if DEBUG_SPINLOCKS
        if (are_interrupts_enabled()) {
-               panic("acquire_read_spinlock: attempt to acquire lock %p with"
-                       " interrupts enabled", lock);
+               panic("acquire_read_spinlock: attempt to acquire lock %p with "
+                       "interrupts enabled", lock);
        }
 #endif
 


Other related posts:

  • » [haiku-commits] BRANCH pdziepak-github.scheduler [cec16c2] in src/system/kernel/scheduler: . src/system/kernel - pdziepak-github . scheduler