[haiku-commits] BRANCH pdziepak-github.scheduler [288a266] src/system/kernel/scheduler

  • From: pdziepak-github.scheduler <community@xxxxxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Wed, 13 Nov 2013 05:45:34 +0100 (CET)

added 3 changesets to branch 'refs/remotes/pdziepak-github/scheduler'
old head: 829f83632457377650eaf92309fbc856a2276e34
new head: 288a2664a2de429f159d746beaab87373184cd3d
overview: https://github.com/pdziepak/Haiku/compare/829f836...288a266

----------------------------------------------------------------------------

5f3a65e: scheduler: Remove sCorePriorityHeap
  
  sCorePriorityHeap was meant to be a temporary solution anyway. Thread
  migration and assignment is now entirely based on core load.

72e1b39: scheduler: Fix gcc2 build

288a266: scheduler: Remove sSchedulerInternalLock
  
   * pin idle threads to their specific CPUs
   * allow scheduler to implement SMP_MSG_RESCHEDULE handler
   * scheduler_set_thread_priority() reworked
   * at reschedule: enqueue old thread after dequeueing the new one

                                    [ Pawel Dziepak <pdziepak@xxxxxxxxxxx> ]

----------------------------------------------------------------------------

4 files changed, 259 insertions(+), 221 deletions(-)
headers/private/kernel/kscheduler.h       |   4 +-
src/system/kernel/scheduler/scheduler.cpp | 468 ++++++++++++++------------
src/system/kernel/smp.cpp                 |   5 +-
src/system/kernel/thread.cpp              |   3 +-

############################################################################

Commit:      5f3a65e5780002f8a4acf814a1b3347e95e65bd2
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Nov 12 23:01:02 2013 UTC

scheduler: Remove sCorePriorityHeap

sCorePriorityHeap was meant to be a temporary solution anyway. Thread
migration and assignment is now entirely based on core load.

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/scheduler.cpp 
b/src/system/kernel/scheduler/scheduler.cpp
index 9a628c8..735cc52 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -115,15 +115,11 @@ struct CoreEntry : public 
DoublyLinkedListLinkImpl<CoreEntry> {
 
        int                     fLoad;
 } CACHE_LINE_ALIGN;
-typedef Heap<CoreEntry, int32, HeapLesserCompare<int32>,
-               HeapMemberGetLink<CoreEntry, int32, 
&CoreEntry::fPriorityHeapLink> >
-       CorePriorityHeap;
 typedef MinMaxHeap<CoreEntry, int, MinMaxHeapCompare<int>,
                MinMaxHeapMemberGetLink<CoreEntry, int, 
&CoreEntry::fLoadHeapLink> >
        CoreLoadHeap;
 
 static CoreEntry* sCoreEntries;
-static CorePriorityHeap* sCorePriorityHeap;
 static CoreLoadHeap* sCoreLoadHeap;
 static CoreLoadHeap* sCoreHighLoadHeap;
 
@@ -390,33 +386,6 @@ dump_core_load_heap(CoreLoadHeap* heap)
 static int
 dump_cpu_heap(int argc, char** argv)
 {
-       CorePriorityHeap temp(sRunQueueCount);
-
-       CoreEntry* entry = sCorePriorityHeap->PeekRoot();
-       if (entry != NULL)
-               kprintf("core priority\n");
-       else
-               kprintf("No active cores.\n");
-
-       while (entry) {
-               int32 core = entry->fCoreID;
-               int32 key = CorePriorityHeap::GetKey(entry);
-               kprintf("%4" B_PRId32 " %8" B_PRId32 "\n", core, key);
-
-               sCorePriorityHeap->RemoveRoot();
-               temp.Insert(entry, key);
-
-               entry = sCorePriorityHeap->PeekRoot();
-       }
-
-       entry = temp.PeekRoot();
-       while (entry) {
-               int32 key = CorePriorityHeap::GetKey(entry);
-               temp.RemoveRoot();
-               sCorePriorityHeap->Insert(entry, key);
-               entry = temp.PeekRoot();
-       }
-
        kprintf("\ncore load\n");
        dump_core_load_heap(sCoreLoadHeap);
        kprintf("---------\n");
@@ -664,6 +633,8 @@ update_priority_heaps(int32 cpu, int32 priority)
 {
        int32 core = sCPUToCore[cpu];
 
+       int32 corePriority = 
CPUHeap::GetKey(sCPUPriorityHeaps[core].PeekMaximum());
+
        sCPUPriorityHeaps[core].ModifyKey(&sCPUEntries[cpu], priority);
 
        if (sSingleCore)
@@ -671,72 +642,63 @@ update_priority_heaps(int32 cpu, int32 priority)
 
        int32 maxPriority
                = CPUHeap::GetKey(sCPUPriorityHeaps[core].PeekMaximum());
-       int32 corePriority = CorePriorityHeap::GetKey(&sCoreEntries[core]);
-
-       if (corePriority != maxPriority) {
-               if (maxPriority == B_IDLE_PRIORITY) {
-                       sCorePriorityHeap->ModifyKey(&sCoreEntries[core], 
B_IDLE_PRIORITY);
-                       ASSERT(sCorePriorityHeap->PeekRoot() == 
&sCoreEntries[core]);
-                       sCorePriorityHeap->RemoveRoot();
-               } else if (corePriority == B_IDLE_PRIORITY)
-                       sCorePriorityHeap->Insert(&sCoreEntries[core], 
maxPriority);
-               else
-                       sCorePriorityHeap->ModifyKey(&sCoreEntries[core], 
maxPriority);
-
-               int32 package = sCPUToPackage[cpu];
-               PackageEntry* packageEntry = &sPackageEntries[package];
-               if (maxPriority == B_IDLE_PRIORITY) {
-                       // core goes idle
-                       ASSERT(packageEntry->fIdleCoreCount >= 0);
-                       ASSERT(packageEntry->fIdleCoreCount < 
packageEntry->fCoreCount);
-
-                       packageEntry->fIdleCoreCount++;
-                       packageEntry->fIdleCores.Add(&sCoreEntries[core]);
-
-                       if (packageEntry->fIdleCoreCount == 1) {
-                               // first core on that package to go idle
-
-                               if (packageEntry->fCoreCount > 1)
-                                       sPackageUsageHeap->Insert(packageEntry, 
1);
-                               else
-                                       sIdlePackageList->Add(packageEntry);
-                       } else if (packageEntry->fIdleCoreCount
-                               == packageEntry->fCoreCount) {
-                               // package goes idle
-                               sPackageUsageHeap->ModifyKey(packageEntry, 0);
-                               ASSERT(sPackageUsageHeap->PeekMinimum() == 
packageEntry);
-                               sPackageUsageHeap->RemoveMinimum();
 
+       if (corePriority == maxPriority)
+               return;
+
+       int32 package = sCPUToPackage[cpu];
+       PackageEntry* packageEntry = &sPackageEntries[package];
+       if (maxPriority == B_IDLE_PRIORITY) {
+               // core goes idle
+               ASSERT(packageEntry->fIdleCoreCount >= 0);
+               ASSERT(packageEntry->fIdleCoreCount < packageEntry->fCoreCount);
+
+               packageEntry->fIdleCoreCount++;
+               packageEntry->fIdleCores.Add(&sCoreEntries[core]);
+
+               if (packageEntry->fIdleCoreCount == 1) {
+                       // first core on that package to go idle
+
+                       if (packageEntry->fCoreCount > 1)
+                               sPackageUsageHeap->Insert(packageEntry, 1);
+                       else
                                sIdlePackageList->Add(packageEntry);
-                       } else {
-                               sPackageUsageHeap->ModifyKey(packageEntry,
-                                       packageEntry->fIdleCoreCount);
-                       }
-               } else if (corePriority == B_IDLE_PRIORITY) {
-                       // core wakes up
-                       ASSERT(packageEntry->fIdleCoreCount > 0);
-                       ASSERT(packageEntry->fIdleCoreCount <= 
packageEntry->fCoreCount);
+               } else if (packageEntry->fIdleCoreCount
+                       == packageEntry->fCoreCount) {
+                       // package goes idle
+                       sPackageUsageHeap->ModifyKey(packageEntry, 0);
+                       ASSERT(sPackageUsageHeap->PeekMinimum() == 
packageEntry);
+                       sPackageUsageHeap->RemoveMinimum();
+
+                       sIdlePackageList->Add(packageEntry);
+               } else {
+                       sPackageUsageHeap->ModifyKey(packageEntry,
+                               packageEntry->fIdleCoreCount);
+               }
+       } else if (corePriority == B_IDLE_PRIORITY) {
+               // core wakes up
+               ASSERT(packageEntry->fIdleCoreCount > 0);
+               ASSERT(packageEntry->fIdleCoreCount <= 
packageEntry->fCoreCount);
 
-                       packageEntry->fIdleCoreCount--;
-                       packageEntry->fIdleCores.Remove(&sCoreEntries[core]);
+               packageEntry->fIdleCoreCount--;
+               packageEntry->fIdleCores.Remove(&sCoreEntries[core]);
 
-                       if (packageEntry->fIdleCoreCount + 1 == 
packageEntry->fCoreCount) {
-                               // package wakes up
-                               sIdlePackageList->Remove(packageEntry);
+               if (packageEntry->fIdleCoreCount + 1 == 
packageEntry->fCoreCount) {
+                       // package wakes up
+                       sIdlePackageList->Remove(packageEntry);
 
-                               if (packageEntry->fIdleCoreCount > 0) {
-                                       sPackageUsageHeap->Insert(packageEntry,
-                                               packageEntry->fIdleCoreCount);
-                               }
-                       } else if (packageEntry->fIdleCoreCount == 0) {
-                               // no more idle cores in the package
-                               sPackageUsageHeap->ModifyKey(packageEntry, 0);
-                               ASSERT(sPackageUsageHeap->PeekMinimum() == 
packageEntry);
-                               sPackageUsageHeap->RemoveMinimum();
-                       } else {
-                               sPackageUsageHeap->ModifyKey(packageEntry,
+                       if (packageEntry->fIdleCoreCount > 0) {
+                               sPackageUsageHeap->Insert(packageEntry,
                                        packageEntry->fIdleCoreCount);
                        }
+               } else if (packageEntry->fIdleCoreCount == 0) {
+                       // no more idle cores in the package
+                       sPackageUsageHeap->ModifyKey(packageEntry, 0);
+                       ASSERT(sPackageUsageHeap->PeekMinimum() == 
packageEntry);
+                       sPackageUsageHeap->RemoveMinimum();
+               } else {
+                       sPackageUsageHeap->ModifyKey(packageEntry,
+                               packageEntry->fIdleCoreCount);
                }
        }
 }
@@ -757,14 +719,9 @@ choose_core_low_latency(Thread* thread)
                entry = package->fIdleCores.Last();
        } else {
                // no idle cores, use least occupied core
-               entry = sCorePriorityHeap->PeekRoot();
-
-               int32 priority = get_effective_priority(thread);
-               if (CorePriorityHeap::GetKey(entry) >= priority) {
-                       entry = sCoreLoadHeap->PeekMinimum();
-                       if (entry == NULL)
-                               entry = sCoreHighLoadHeap->PeekMinimum();
-               }
+               entry = sCoreLoadHeap->PeekMinimum();
+               if (entry == NULL)
+                       entry = sCoreHighLoadHeap->PeekMinimum();
        }
 
        ASSERT(entry != NULL);
@@ -792,11 +749,9 @@ choose_core_power_saving(Thread* thread)
                if (sSmallTaskCore < 0)
                        sSmallTaskCore = sCoreLoadHeap->PeekMaximum()->fCoreID;
                entry = &sCoreEntries[sSmallTaskCore];
-       } else if (sCorePriorityHeap->PeekRoot() != NULL
-               && CorePriorityHeap::GetKey(sCorePriorityHeap->PeekRoot())
-                       < priority) {
+       } else if (sCoreLoadHeap->PeekMinimum() != NULL) {
                // run immediately on already woken core
-               entry = sCorePriorityHeap->PeekRoot();
+               entry = sCoreLoadHeap->PeekMinimum();
        } else if (sPackageUsageHeap->PeekMinimum() != NULL) {
                // wake new core
                PackageEntry* package = sPackageUsageHeap->PeekMinimum();
@@ -1749,11 +1704,6 @@ _scheduler_init()
                return B_NO_MEMORY;
        ArrayDeleter<CoreEntry> coreEntriesDeleter(sCoreEntries);
 
-       sCorePriorityHeap = new CorePriorityHeap(coreCount);
-       if (sCorePriorityHeap == NULL)
-               return B_NO_MEMORY;
-       ObjectDeleter<CorePriorityHeap> 
corePriorityHeapDeleter(sCorePriorityHeap);
-
        sCoreLoadHeap = new CoreLoadHeap;
        if (sCoreLoadHeap == NULL)
                return B_NO_MEMORY;
@@ -1770,11 +1720,6 @@ _scheduler_init()
                status_t result = sCoreLoadHeap->Insert(&sCoreEntries[i], 0);
                if (result != B_OK)
                        return result;
-
-               result = sCorePriorityHeap->Insert(&sCoreEntries[i], 
B_IDLE_PRIORITY);
-               if (result != B_OK)
-                       return result;
-               sCorePriorityHeap->RemoveRoot();
        }
 
        sCPUPriorityHeaps = new CPUHeap[coreCount];
@@ -1845,7 +1790,6 @@ _scheduler_init()
        pinnedRunQueuesDeleter.Detach();
        coreHighLoadHeapDeleter.Detach();
        coreLoadHeapDeleter.Detach();
-       corePriorityHeapDeleter.Detach();
        cpuPriorityHeapDeleter.Detach();
        coreEntriesDeleter.Detach();
        cpuEntriesDeleter.Detach();

############################################################################

Commit:      72e1b394a40aa34fd93fc83e9137dfbe48a0e34e
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Nov 12 23:36:48 2013 UTC

scheduler: Fix gcc2 build

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/scheduler.cpp 
b/src/system/kernel/scheduler/scheduler.cpp
index 735cc52..0320363 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -91,19 +91,17 @@ struct CPUEntry : public MinMaxHeapLinkImpl<CPUEntry, 
int32> {
        bigtime_t       fMeasureActiveTime;
        bigtime_t       fMeasureTime;
 
-       int                     fLoad;
+       int32           fLoad;
 } CACHE_LINE_ALIGN;
 typedef MinMaxHeap<CPUEntry, int32> CPUHeap CACHE_LINE_ALIGN;
 
 static CPUEntry* sCPUEntries;
 static CPUHeap* sCPUPriorityHeaps;
 
-struct CoreEntry : public DoublyLinkedListLinkImpl<CoreEntry> {
+struct CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
+       DoublyLinkedListLinkImpl<CoreEntry> {
                                CoreEntry();
 
-       HeapLink<CoreEntry, int32>      fPriorityHeapLink;
-       MinMaxHeapLink<CoreEntry, int>  fLoadHeapLink;
-
        int32           fCoreID;
 
        bigtime_t       fStartedBottom;
@@ -113,11 +111,9 @@ struct CoreEntry : public 
DoublyLinkedListLinkImpl<CoreEntry> {
 
        bigtime_t       fActiveTime;
 
-       int                     fLoad;
+       int32           fLoad;
 } CACHE_LINE_ALIGN;
-typedef MinMaxHeap<CoreEntry, int, MinMaxHeapCompare<int>,
-               MinMaxHeapMemberGetLink<CoreEntry, int, 
&CoreEntry::fLoadHeapLink> >
-       CoreLoadHeap;
+typedef MinMaxHeap<CoreEntry, int32> CoreLoadHeap;
 
 static CoreEntry* sCoreEntries;
 static CoreLoadHeap* sCoreLoadHeap;
@@ -182,7 +178,7 @@ struct scheduler_thread_data {
 
                        bigtime_t       measure_active_time;
                        bigtime_t       measure_time;
-                       int                     load;
+                       int32           load;
 
                        bigtime_t       went_sleep;
                        bigtime_t       went_sleep_active;
@@ -336,7 +332,7 @@ dump_heap(CPUHeap* heap)
        while (entry) {
                int32 cpu = entry->fCPUNumber;
                int32 key = CPUHeap::GetKey(entry);
-               kprintf("%3" B_PRId32 " %8" B_PRId32 " %3d%%\n", cpu, key,
+               kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", 
cpu, key,
                        sCPUEntries[cpu].fLoad / 10);
 
                heap->RemoveMinimum();
@@ -363,7 +359,7 @@ dump_core_load_heap(CoreLoadHeap* heap)
 
        CoreEntry* entry = heap->PeekMinimum();
        while (entry) {
-               int key = CoreLoadHeap::GetKey(entry);
+               int32 key = CoreLoadHeap::GetKey(entry);
                kprintf("%4" B_PRId32 " %3" B_PRId32 "%%\n", entry->fCoreID,
                        entry->fLoad / cpuPerCore / 10);
 
@@ -375,7 +371,7 @@ dump_core_load_heap(CoreLoadHeap* heap)
 
        entry = temp.PeekMinimum();
        while (entry) {
-               int key = CoreLoadHeap::GetKey(entry);
+               int32 key = CoreLoadHeap::GetKey(entry);
                temp.RemoveMinimum();
                heap->Insert(entry, key);
                entry = temp.PeekMinimum();
@@ -517,7 +513,7 @@ scheduler_dump_thread_data(Thread* thread)
                additionalPenalty, schedulerThreadData->additional_penalty);
        kprintf("\tstolen_time:\t\t%" B_PRId64 "\n",
                schedulerThreadData->stolen_time);
-       kprintf("\tload:\t\t\t%d%%\n", schedulerThreadData->load / 10);
+       kprintf("\tload:\t\t\t%" B_PRId32 "%%\n", schedulerThreadData->load / 
10);
        kprintf("\twent_sleep:\t\t%" B_PRId64 "\n",
                schedulerThreadData->went_sleep);
        kprintf("\twent_sleep_active:\t%" B_PRId64 "\n",
@@ -539,8 +535,8 @@ update_load_heaps(int32 core)
        CoreEntry* entry = &sCoreEntries[core];
 
        int32 cpuPerCore = smp_get_num_cpus() / sRunQueueCount;
-       int newKey = entry->fLoad / cpuPerCore;
-       int oldKey = CoreLoadHeap::GetKey(entry);
+       int32 newKey = entry->fLoad / cpuPerCore;
+       int32 oldKey = CoreLoadHeap::GetKey(entry);
 
        ASSERT(oldKey >= 0 && oldKey <= kMaxLoad);
        ASSERT(newKey >= 0 && newKey <= kMaxLoad);
@@ -741,8 +737,6 @@ choose_core_power_saving(Thread* thread)
 {
        CoreEntry* entry;
 
-       int32 priority = get_effective_priority(thread);
-
        if (is_small_task_packing_enabled() && is_task_small(thread)
                && sCoreLoadHeap->PeekMaximum() != NULL) {
                // try to pack all threads on one core
@@ -892,7 +886,7 @@ should_rebalance(Thread* thread)
 
 
 static inline int
-compute_load(bigtime_t& measureTime, bigtime_t& measureActiveTime, int& load)
+compute_load(bigtime_t& measureTime, bigtime_t& measureActiveTime, int32& load)
 {
        const bigtime_t kLoadMeasureInterval = 50000;
        const bigtime_t kIntervalInaccuracy = kLoadMeasureInterval / 4;
@@ -948,7 +942,7 @@ compute_cpu_load(int32 cpu)
        if (oldLoad != sCPUEntries[cpu].fLoad) {
                int32 core = sCPUToCore[cpu];
 
-               int delta = sCPUEntries[cpu].fLoad - oldLoad;
+               int32 delta = sCPUEntries[cpu].fLoad - oldLoad;
                atomic_add(&sCoreEntries[core].fLoad, delta);
 
                update_load_heaps(core);
@@ -1116,8 +1110,6 @@ scheduler_enqueue_in_run_queue(Thread *thread)
 
        int32 core = schedulerThreadData->previous_core;
        if (core >= 0) {
-               int32 priority = get_effective_priority(thread);
-
                if (should_cancel_penalty(thread))
                        cancel_penalty(thread);
        }
@@ -1369,12 +1361,12 @@ track_cpu_activity(Thread* oldThread, Thread* 
nextThread, int32 thisCore)
 static inline void
 update_cpu_performance(Thread* thread, int32 thisCore)
 {
-       int load = max_c(thread->scheduler_data->load,
+       int32 load = max_c(thread->scheduler_data->load,
                        sCoreEntries[thisCore].fLoad);
        load = min_c(max_c(load, 0), kMaxLoad);
 
        if (load < kTargetLoad) {
-               int delta = kTargetLoad - load;
+               int32 delta = kTargetLoad - load;
 
                delta *= kTargetLoad;
                delta /= kCPUPerformanceScaleMax;
@@ -1384,7 +1376,7 @@ update_cpu_performance(Thread* thread, int32 thisCore)
                bool allowBoost = sSchedulerMode != SCHEDULER_MODE_POWER_SAVING;
                allowBoost = allowBoost || 
thread->scheduler_data->priority_penalty > 0;
 
-               int delta = load - kTargetLoad;
+               int32 delta = load - kTargetLoad;
                delta *= kMaxLoad - kTargetLoad;
                delta /= kCPUPerformanceScaleMax;
 

############################################################################

Commit:      288a2664a2de429f159d746beaab87373184cd3d
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Wed Nov 13 04:31:58 2013 UTC

scheduler: Remove sSchedulerInternalLock

 * pin idle threads to their specific CPUs
 * allow scheduler to implement SMP_MSG_RESCHEDULE handler
 * scheduler_set_thread_priority() reworked
 * at reschedule: enqueue old thread after dequeueing the new one

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/kscheduler.h 
b/headers/private/kernel/kscheduler.h
index bcb5fd6..addd458 100644
--- a/headers/private/kernel/kscheduler.h
+++ b/headers/private/kernel/kscheduler.h
@@ -34,6 +34,8 @@ extern "C" {
 */
 void scheduler_enqueue_in_run_queue(Thread* thread);
 
+void scheduler_reschedule_ici(void);
+
 /*!    Selects a thread from the ready-to-run queue and, if that's not the
        calling thread, switches the current CPU's context to run the selected
        thread.
@@ -59,7 +61,7 @@ status_t scheduler_on_thread_create(Thread* thread, bool 
idleThread);
        use.
        The per-thread housekeeping data structures are reset, if needed.
 */
-void  scheduler_on_thread_init(Thread* thread);
+void scheduler_on_thread_init(Thread* thread);
 
 /*!    Called when a Thread structure is freed.
        Frees up any per-thread resources allocated on the scheduler's part. The
diff --git a/src/system/kernel/scheduler/scheduler.cpp 
b/src/system/kernel/scheduler/scheduler.cpp
index 0320363..d8a3d98 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -51,7 +51,6 @@
 SchedulerListenerList gSchedulerListeners;
 spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER;
 
-static spinlock sSchedulerInternalLock;
 static bool sSchedulerEnabled;
 
 const bigtime_t kThreadQuantum = 1000;
@@ -73,6 +72,7 @@ static int32 sSmallTaskCore;
 static bool sSingleCore;
 
 static scheduler_mode sSchedulerMode;
+static rw_spinlock sSchedulerModeLock = B_RW_SPINLOCK_INITIALIZER;
 
 static int32 (*sChooseCore)(Thread* thread);
 static bool (*sShouldRebalance)(Thread* thread);
@@ -88,6 +88,8 @@ struct CPUEntry : public MinMaxHeapLinkImpl<CPUEntry, int32> {
 
        int32           fCPUNumber;
 
+       int32           fPriority;
+
        bigtime_t       fMeasureActiveTime;
        bigtime_t       fMeasureTime;
 
@@ -104,6 +106,8 @@ struct CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, 
int32>,
 
        int32           fCoreID;
 
+       spinlock        fLock;
+
        bigtime_t       fStartedBottom;
        bigtime_t       fReachedBottom;
        bigtime_t       fStartedIdle;
@@ -118,6 +122,7 @@ typedef MinMaxHeap<CoreEntry, int32> CoreLoadHeap;
 static CoreEntry* sCoreEntries;
 static CoreLoadHeap* sCoreLoadHeap;
 static CoreLoadHeap* sCoreHighLoadHeap;
+static spinlock sCoreHeapsLock = B_SPINLOCK_INITIALIZER;
 
 // sPackageUsageHeap is used to decide which core should be woken up from the
 // idle state. When aiming for performance we should use as many packages as
@@ -145,6 +150,7 @@ typedef DoublyLinkedList<PackageEntry> IdlePackageList;
 static PackageEntry* sPackageEntries;
 static PackageHeap* sPackageUsageHeap;
 static IdlePackageList* sIdlePackageList;
+static spinlock sIdlePackageLock = B_SPINLOCK_INITIALIZER;
 
 // The run queues. Holds the threads ready to run ordered by priority.
 // One queue per schedulable target per core. Additionally, each
@@ -184,11 +190,14 @@ struct scheduler_thread_data {
                        bigtime_t       went_sleep_active;
 
                        int32           previous_core;
+
+                       bool            enqueued;
 };
 
 
 CPUEntry::CPUEntry()
        :
+       fPriority(B_IDLE_PRIORITY),
        fMeasureActiveTime(0),
        fMeasureTime(0),
        fLoad(0)
@@ -201,6 +210,7 @@ CoreEntry::CoreEntry()
        fActiveTime(0),
        fLoad(0)
 {
+       B_INITIALIZE_SPINLOCK(&fLock);
 }
 
 
@@ -232,6 +242,7 @@ scheduler_thread_data::Init()
        cpu_bound = true;
 
        previous_core = -1;
+       enqueued = false;
 }
 
 
@@ -534,6 +545,8 @@ update_load_heaps(int32 core)
 
        CoreEntry* entry = &sCoreEntries[core];
 
+       SpinLocker coreLocker(sCoreHeapsLock);
+
        int32 cpuPerCore = smp_get_num_cpus() / sRunQueueCount;
        int32 newKey = entry->fLoad / cpuPerCore;
        int32 oldKey = CoreLoadHeap::GetKey(entry);
@@ -625,12 +638,13 @@ cancel_penalty(Thread* thread)
 
 
 static inline void
-update_priority_heaps(int32 cpu, int32 priority)
+update_cpu_priority(int32 cpu, int32 priority)
 {
        int32 core = sCPUToCore[cpu];
 
        int32 corePriority = 
CPUHeap::GetKey(sCPUPriorityHeaps[core].PeekMaximum());
 
+       sCPUEntries[cpu].fPriority = priority;
        sCPUPriorityHeaps[core].ModifyKey(&sCPUEntries[cpu], priority);
 
        if (sSingleCore)
@@ -645,6 +659,8 @@ update_priority_heaps(int32 cpu, int32 priority)
        int32 package = sCPUToPackage[cpu];
        PackageEntry* packageEntry = &sPackageEntries[package];
        if (maxPriority == B_IDLE_PRIORITY) {
+               SpinLocker _(sIdlePackageLock);
+
                // core goes idle
                ASSERT(packageEntry->fIdleCoreCount >= 0);
                ASSERT(packageEntry->fIdleCoreCount < packageEntry->fCoreCount);
@@ -672,6 +688,8 @@ update_priority_heaps(int32 cpu, int32 priority)
                                packageEntry->fIdleCoreCount);
                }
        } else if (corePriority == B_IDLE_PRIORITY) {
+               SpinLocker _(sIdlePackageLock);
+
                // core wakes up
                ASSERT(packageEntry->fIdleCoreCount > 0);
                ASSERT(packageEntry->fIdleCoreCount <= 
packageEntry->fCoreCount);
@@ -783,9 +801,11 @@ choose_cpu(int32 core)
 }
 
 
-static void
+static bool
 choose_core_and_cpu(Thread* thread, int32& targetCore, int32& targetCPU)
 {
+       SpinLocker coreLocker(sCoreHeapsLock);
+
        if (targetCore == -1 && targetCPU != -1)
                targetCore = sCPUToCore[targetCPU];
        else if (targetCore != -1 && targetCPU == -1)
@@ -797,6 +817,19 @@ choose_core_and_cpu(Thread* thread, int32& targetCore, 
int32& targetCPU)
 
        ASSERT(targetCore >= 0 && targetCore < sRunQueueCount);
        ASSERT(targetCPU >= 0 && targetCPU < smp_get_num_cpus());
+
+       int32 targetPriority = sCPUEntries[targetCPU].fPriority;
+       int32 threadPriority = get_effective_priority(thread);
+
+       if (threadPriority > targetPriority) {
+               // It is possible that another CPU schedules the thread before 
the
+               // target CPU. However, since the target CPU is sent an ICI it 
will
+               // reschedule anyway and update its heap key to the correct 
value.
+               update_cpu_priority(targetCPU, threadPriority);
+               return true;
+       }
+
+       return false;
 }
 
 
@@ -816,6 +849,8 @@ should_rebalance_low_latency(Thread* thread)
        // If there is high load on this core but this thread does not 
contribute
        // significantly consider giving it to someone less busy.
        if (coreEntry->fLoad > kHighLoad) {
+               SpinLocker coreLocker(sCoreHeapsLock);
+
                CoreEntry* other = sCoreLoadHeap->PeekMinimum();
                if (other != NULL && coreEntry->fLoad - other->fLoad >= 
kLoadDifference)
                        return true;
@@ -823,6 +858,8 @@ should_rebalance_low_latency(Thread* thread)
 
        // No cpu bound threads - the situation is quite good. Make sure it
        // won't get much worse...
+       SpinLocker coreLocker(sCoreHeapsLock);
+
        CoreEntry* other = sCoreLoadHeap->PeekMinimum();
        if (other == NULL)
                other = sCoreHighLoadHeap->PeekMinimum();
@@ -866,6 +903,8 @@ should_rebalance_power_saving(Thread* thread)
 
        // No cpu bound threads - the situation is quite good. Make sure it
        // won't get much worse...
+       SpinLocker coreLocker(sCoreHeapsLock);
+
        CoreEntry* other = sCoreLoadHeap->PeekMinimum();
        if (other == NULL)
                other = sCoreHighLoadHeap->PeekMinimum();
@@ -1031,7 +1070,8 @@ enqueue(Thread* thread, bool newOne)
        compute_thread_load(thread);
 
        scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
-
+       schedulerThreadData->cpu_bound = true;
+       schedulerThreadData->time_left = 0;
        int32 threadPriority = get_effective_priority(thread);
 
        T(EnqueueThread(thread, threadPriority));
@@ -1043,48 +1083,31 @@ enqueue(Thread* thread, bool newOne)
                targetCPU = thread->previous_cpu->cpu_num;
        else if (sSingleCore)
                targetCore = 0;
-       else if (schedulerThreadData->previous_core < 0
-               || (newOne && has_cache_expired(thread))
-               || should_rebalance(thread)) {
-
-               if (thread_is_idle_thread(thread))
-                       targetCPU = thread->previous_cpu->cpu_num;
-               
-       } else
+       else if (schedulerThreadData->previous_core >= 0
+               && (!newOne || !has_cache_expired(thread))
+               && !should_rebalance(thread)) {
                targetCore = schedulerThreadData->previous_core;
+       }
 
-       choose_core_and_cpu(thread, targetCore, targetCPU);
+       bool shouldReschedule = choose_core_and_cpu(thread, targetCore, 
targetCPU);
        schedulerThreadData->previous_core = targetCore;
 
-       TRACE("enqueueing thread %ld with priority %ld\n", thread->id,
-               threadPriority);
+       TRACE("enqueueing thread %ld with priority %ld on CPU %ld (core %ld)\n",
+               thread->id, threadPriority, targetCPU, targetCore);
+
+       SpinLocker runQueueLocker(sCoreEntries[targetCore].fLock);
+       thread->scheduler_data->enqueued = true;
        if (pinned)
                sPinnedRunQueues[targetCPU].PushBack(thread, threadPriority);
        else
                sRunQueues[targetCore].PushBack(thread, threadPriority);
-
-       schedulerThreadData->cpu_bound = true;
-       schedulerThreadData->time_left = 0;
-       schedulerThreadData->stolen_time = 0;
+       runQueueLocker.Unlock();
 
        // notify listeners
        NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
                thread);
 
-       Thread* targetThread = gCPU[targetCPU].running_thread;
-       int32 targetPriority = get_effective_priority(targetThread);
-
-       TRACE("choosing CPU %ld (core %ld) with current priority %ld\n", 
targetCPU,
-               targetCore, targetPriority);
-
-       if (threadPriority > targetPriority) {
-               targetThread->scheduler_data->lost_cpu = true;
-
-               // It is possible that another CPU schedules the thread before 
the
-               // target CPU. However, since the target CPU is sent an ICI it 
will
-               // reschedule anyway and update its heap key to the correct 
value.
-               update_priority_heaps(targetCPU, threadPriority);
-
+       if (shouldReschedule) {
                if (targetCPU == smp_get_current_cpu())
                        gCPU[targetCPU].invoke_scheduler = true;
                else {
@@ -1101,7 +1124,7 @@ enqueue(Thread* thread, bool newOne)
 void
 scheduler_enqueue_in_run_queue(Thread *thread)
 {
-       InterruptsSpinLocker _(sSchedulerInternalLock);
+       InterruptsReadSpinLocker modeLocker(sSchedulerModeLock);
 
        TRACE("enqueueing new thread %ld with static priority %ld\n", 
thread->id,
                thread->priority);
@@ -1123,13 +1146,21 @@ put_back(Thread* thread)
 {
        compute_thread_load(thread);
 
+       int32 core = sCPUToCore[smp_get_current_cpu()];
+
+       SpinLocker runQueueLocker(sCoreEntries[core].fLock);
+       thread->scheduler_data->enqueued = true;
        if (thread->pinned_to_cpu > 0) {
                int32 pinnedCPU = thread->previous_cpu->cpu_num;
+
+               ASSERT(pinnedCPU == smp_get_current_cpu());
                sPinnedRunQueues[pinnedCPU].PushFront(thread,
                        get_effective_priority(thread));
        } else {
                int32 previousCore = thread->scheduler_data->previous_core;
                ASSERT(previousCore >= 0);
+
+               ASSERT(previousCore == core);
                sRunQueues[previousCore].PushFront(thread,
                        get_effective_priority(thread));
        }
@@ -1141,58 +1172,88 @@ put_back(Thread* thread)
 int32
 scheduler_set_thread_priority(Thread *thread, int32 priority)
 {
-       InterruptsSpinLocker _(sSchedulerInternalLock);
-
-       if (priority == thread->priority)
-               return thread->priority;
+       InterruptsSpinLocker _(thread->scheduler_lock);
+       InterruptsReadSpinLocker modeLocker(sSchedulerModeLock);
 
        int32 oldPriority = thread->priority;
 
        TRACE("changing thread %ld priority to %ld (old: %ld, effective: 
%ld)\n",
                thread->id, priority, oldPriority, 
get_effective_priority(thread));
 
+       cancel_penalty(thread);
+
+       if (priority == thread->priority)
+               return thread->priority;
+
+       thread->priority = priority;
+
        if (thread->state != B_THREAD_READY) {
                cancel_penalty(thread);
                thread->priority = priority;
 
-               if (thread->state == B_THREAD_RUNNING)
-                       update_priority_heaps(thread->cpu->cpu_num, priority);
+               if (thread->state == B_THREAD_RUNNING) {
+                       SpinLocker coreLocker(sCoreHeapsLock);
+                       update_cpu_priority(thread->cpu->cpu_num, priority);
+               }
                return oldPriority;
        }
 
        // The thread is in the run queue. We need to remove it and re-insert 
it at
        // a new position.
 
-       T(RemoveThread(thread));
-
-       // notify listeners
-       NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue,
-               thread);
-
-       // remove thread from run queue
+       bool pinned = thread->pinned_to_cpu > 0;
+       int32 previousCPU = thread->previous_cpu->cpu_num;
        int32 previousCore = thread->scheduler_data->previous_core;
        ASSERT(previousCore >= 0);
-       sRunQueues[previousCore].Remove(thread);
 
-       // set priority and re-insert
-       cancel_penalty(thread);
-       thread->priority = priority;
-       enqueue(thread, true);
+       SpinLocker runQueueLocker(sCoreEntries[previousCore].fLock);
+
+       // the thread might have been already dequeued and is about to start
+       // running once we release its scheduler_lock, in such case we can not
+       // attempt to dequeue it
+       if (thread->scheduler_data->enqueued) {
+               T(RemoveThread(thread));
+
+               // notify listeners
+               
NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue,
+                       thread);
+
+               thread->scheduler_data->enqueued = false;
+               if (pinned)
+                       sPinnedRunQueues[previousCPU].Remove(thread);
+               else
+                       sRunQueues[previousCore].Remove(thread);
+               runQueueLocker.Unlock();
+
+               enqueue(thread, true);
+       }
 
        return oldPriority;
 }
 
 
-static int32
-reschedule_event(timer *unused)
+static inline void
+reschedule_needed()
 {
-       // This function is called as a result of the timer event set by the
-       // scheduler. Make sure the reschedule() is invoked.
-       Thread* thread= thread_get_current_thread();
+       // This function is called as a result of either the timer event set by 
the
+       // scheduler or an incoming ICI. Make sure the reschedule() is invoked.
+       thread_get_current_thread()->scheduler_data->lost_cpu = true;
+       get_cpu_struct()->invoke_scheduler = true;
+}
+
+
+void
+scheduler_reschedule_ici()
+{
+       reschedule_needed();
+}
+
 
-       thread->scheduler_data->lost_cpu = true;
-       thread->cpu->invoke_scheduler = true;
-       thread->cpu->preempted = true;
+static int32
+reschedule_event(timer* /* unused */)
+{
+       reschedule_needed();
+       get_cpu_struct()->preempted = true;
        return B_HANDLED_INTERRUPT;
 }
 
@@ -1273,14 +1334,16 @@ compute_quantum(Thread* thread)
 
 
 static inline Thread*
-dequeue_thread(int32 thisCPU)
+choose_next_thread(int32 thisCPU, Thread* oldThread, bool putAtBack)
 {
        int32 thisCore = sCPUToCore[thisCPU];
 
+       SpinLocker runQueueLocker(sCoreEntries[thisCore].fLock);
+
        Thread* sharedThread = sRunQueues[thisCore].PeekMaximum();
        Thread* pinnedThread = sPinnedRunQueues[thisCPU].PeekMaximum();
 
-       ASSERT(sharedThread != NULL || pinnedThread != NULL);
+       ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != 
NULL);
 
        int32 pinnedPriority = -1;
        if (pinnedThread != NULL)
@@ -1290,11 +1353,27 @@ dequeue_thread(int32 thisCPU)
        if (sharedThread != NULL)
                sharedPriority = get_effective_priority(sharedThread);
 
+       int32 oldPriority = -1;
+       if (oldThread != NULL)
+               oldPriority = get_effective_priority(oldThread);
+
+       int32 rest = max_c(pinnedPriority, sharedPriority);
+       if (oldPriority > rest || (!putAtBack && oldPriority == rest)) {
+               ASSERT(!oldThread->scheduler_data->enqueued);
+               return oldThread;
+       }
+
        if (sharedPriority > pinnedPriority) {
+               ASSERT(sharedThread->scheduler_data->enqueued);
+               sharedThread->scheduler_data->enqueued = false;
+
                sRunQueues[thisCore].Remove(sharedThread);
                return sharedThread;
        }
 
+       ASSERT(pinnedThread->scheduler_data->enqueued);
+       pinnedThread->scheduler_data->enqueued = false;
+
        sPinnedRunQueues[thisCPU].Remove(pinnedThread);
        return pinnedThread;
 }
@@ -1388,7 +1467,7 @@ update_cpu_performance(Thread* thread, int32 thisCore)
 static void
 _scheduler_reschedule(void)
 {
-       InterruptsSpinLocker internalLocker(sSchedulerInternalLock);
+       InterruptsReadSpinLocker modeLocker(sSchedulerModeLock);
 
        Thread* oldThread = thread_get_current_thread();
 
@@ -1401,14 +1480,13 @@ _scheduler_reschedule(void)
        oldThread->state = oldThread->next_state;
        scheduler_thread_data* schedulerOldThreadData = 
oldThread->scheduler_data;
 
-       // update CPU heap so that old thread would have CPU properly chosen
-       Thread* nextThread = sRunQueues[thisCore].PeekMaximum();
-       if (nextThread != NULL)
-               update_priority_heaps(thisCPU, 
get_effective_priority(nextThread));
-
+       bool enqueueOldThread = false;
+       bool putOldThreadAtBack = false;
        switch (oldThread->next_state) {
                case B_THREAD_RUNNING:
                case B_THREAD_READY:
+                       enqueueOldThread = true;
+
                        if (!schedulerOldThreadData->lost_cpu)
                                schedulerOldThreadData->cpu_bound = false;
 
@@ -1419,11 +1497,11 @@ _scheduler_reschedule(void)
 
                                TRACE("enqueueing thread %ld into run queue 
priority = %ld\n",
                                        oldThread->id, 
get_effective_priority(oldThread));
-                               enqueue(oldThread, false);
+                               putOldThreadAtBack = true;
                        } else {
                                TRACE("putting thread %ld back in run queue 
priority = %ld\n",
                                        oldThread->id, 
get_effective_priority(oldThread));
-                               put_back(oldThread);
+                               putOldThreadAtBack = false;
                        }
 
                        break;
@@ -1445,10 +1523,20 @@ _scheduler_reschedule(void)
        oldThread->has_yielded = false;
        schedulerOldThreadData->lost_cpu = false;
 
-       // select thread with the biggest priority
-       nextThread = dequeue_thread(thisCPU);
-       if (nextThread != oldThread)
+       // select thread with the biggest priority and enqueue back the old 
thread
+       Thread* nextThread
+               = choose_next_thread(thisCPU, enqueueOldThread ? oldThread : 
NULL,
+                       putOldThreadAtBack);
+       if (nextThread != oldThread) {
+               if (enqueueOldThread) {
+                       if (putOldThreadAtBack)
+                               enqueue(oldThread, false);
+                       else
+                               put_back(oldThread);
+               }
+
                acquire_spinlock(&nextThread->scheduler_lock);
+       }
 
        TRACE("reschedule(): cpu %ld, next thread = %ld\n", thisCPU,
                nextThread->id);
@@ -1460,12 +1548,16 @@ _scheduler_reschedule(void)
                oldThread, nextThread);
 
        // update CPU heap
-       update_priority_heaps(thisCPU,
-               get_effective_priority(nextThread));
+       {
+               SpinLocker coreLocker(sCoreHeapsLock);
+               update_cpu_priority(thisCPU, 
get_effective_priority(nextThread));
+       }
 
        nextThread->state = B_THREAD_RUNNING;
        nextThread->next_state = B_THREAD_READY;
+
        ASSERT(nextThread->scheduler_data->previous_core == thisCore);
+
        compute_thread_load(nextThread);
 
        // track kernel time (user time is tracked in thread_at_kernel_entry())
@@ -1489,7 +1581,7 @@ _scheduler_reschedule(void)
                } else
                        nextThread->scheduler_data->quantum_start = 
system_time();
 
-               internalLocker.Unlock();
+               modeLocker.Unlock();
                if (nextThread != oldThread)
                        scheduler_switch_thread(oldThread, nextThread);
        }
@@ -1527,6 +1619,16 @@ void
 scheduler_on_thread_init(Thread* thread)
 {
        thread->scheduler_data->Init();
+
+       if (thread_is_idle_thread(thread)) {
+               static int32 sIdleThreadsID;
+               int32 cpu = atomic_add(&sIdleThreadsID, 1);
+
+               thread->previous_cpu = &gCPU[cpu];
+               thread->pinned_to_cpu = 1;
+
+               thread->scheduler_data->previous_core = sCPUToCore[cpu];
+       }
 }
 
 
@@ -1560,7 +1662,7 @@ scheduler_set_operation_mode(scheduler_mode mode)
        const char* modeNames[] = { "low latency", "power saving" };
        dprintf("scheduler: switching to %s mode\n", modeNames[mode]);
 
-       InterruptsSpinLocker _(sSchedulerInternalLock);
+       InterruptsWriteSpinLocker _(sSchedulerModeLock);
 
        sSchedulerMode = mode;
        switch (mode) {
diff --git a/src/system/kernel/smp.cpp b/src/system/kernel/smp.cpp
index 950b499..d73f787 100644
--- a/src/system/kernel/smp.cpp
+++ b/src/system/kernel/smp.cpp
@@ -874,11 +874,8 @@ process_pending_ici(int32 currentCPU)
                        break;
                }
                case SMP_MSG_RESCHEDULE:
-               {
-                       cpu_ent* cpu = thread_get_current_thread()->cpu;
-                       cpu->invoke_scheduler = true;
+                       scheduler_reschedule_ici();
                        break;
-               }
 
                default:
                        dprintf("smp_intercpu_int_handler: got unknown message 
%" B_PRId32 "\n",
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
index a2cebdb..3aee21c 100644
--- a/src/system/kernel/thread.cpp
+++ b/src/system/kernel/thread.cpp
@@ -2681,7 +2681,6 @@ thread_init(kernel_args *args)
                }
 
                gCPU[i].running_thread = thread;
-               thread->previous_cpu = &gCPU[i];
 
                thread->team = team_get_kernel_team();
                thread->priority = B_IDLE_PRIORITY;
@@ -2698,6 +2697,8 @@ thread_init(kernel_args *args)
 
                thread->visible = true;
                insert_thread_into_team(thread->team, thread);
+
+               scheduler_on_thread_init(thread);
        }
        sUsedThreads = args->num_cpus;
 


Other related posts:

  • » [haiku-commits] BRANCH pdziepak-github.scheduler [288a266] src/system/kernel/scheduler - pdziepak-github . scheduler