[haiku-commits] BRANCH pdziepak-github.scheduler [bab69bd] src/system/kernel/scheduler

  • From: pdziepak-github.scheduler <community@xxxxxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 8 Oct 2013 05:15:35 +0200 (CEST)

added 3 changesets to branch 'refs/remotes/pdziepak-github/scheduler'
old head: 4cba4ff1df8b1dd778bf092f9c1936ff8385ed88
new head: bab69bdb4707498efc97bb3bf3d58d58553b4dbc
overview: https://github.com/pdziepak/Haiku/compare/4cba4ff...bab69bd

----------------------------------------------------------------------------

21808e8: kernel: Limit maximum priority penalty
  
  The maximum penalty the thread can receive is now limited depending on
  the real thread priority. However, since it make it possible to starve
  threads with priority lower than that limit. To prevent that threads
  that have already earned the maximum penalty are periodically forced
  to yield CPU to all other threads.

547b8c7: kernel: Cancel penalty only if the thread actually waits
  
  Require the thread to give up CPU for at least one time slice before
  cancelling its penalty.

bab69bd: kernel: Force high priority threads to yield less often

                                    [ Pawel Dziepak <pdziepak@xxxxxxxxxxx> ]

----------------------------------------------------------------------------

1 file changed, 33 insertions(+), 5 deletions(-)
src/system/kernel/scheduler/scheduler_simple.cpp | 38 +++++++++++++++++---

############################################################################

Commit:      21808e8f0bf71d342e572e9980119c0a5dc24ec6
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Oct  8 00:54:58 2013 UTC

kernel: Limit maximum priority penalty

The maximum penalty the thread can receive is now limited depending on
the real thread priority. However, since it make it possible to starve
threads with priority lower than that limit. To prevent that threads
that have already earned the maximum penalty are periodically forced
to yield CPU to all other threads.

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp 
b/src/system/kernel/scheduler/scheduler_simple.cpp
index 417e908..461c601 100644
--- a/src/system/kernel/scheduler/scheduler_simple.cpp
+++ b/src/system/kernel/scheduler/scheduler_simple.cpp
@@ -51,6 +51,8 @@ struct scheduler_thread_data {
        void            Init();
 
        int32           priority_penalty;
+       int32           forced_yield_count;
+
        bool            lost_cpu;
        bool            cpu_bound;
 
@@ -64,6 +66,8 @@ void
 scheduler_thread_data::Init()
 {
        priority_penalty = 0;
+       forced_yield_count = 0;
+
        time_left = 0;
        stolen_time = 0;
 
@@ -107,8 +111,14 @@ simple_get_effective_priority(Thread *thread)
                = 
reinterpret_cast<scheduler_thread_data*>(thread->scheduler_data);
 
        int32 effectivePriority = thread->priority;
-       if (effectivePriority < B_FIRST_REAL_TIME_PRIORITY)
-               effectivePriority -= schedulerThreadData->priority_penalty;
+       if (effectivePriority < B_FIRST_REAL_TIME_PRIORITY) {
+               if (schedulerThreadData->forced_yield_count
+                       && schedulerThreadData->forced_yield_count % 16 == 0) {
+                       TRACE("forcing thread %ld to yield\n", thread->id);
+                       effectivePriority = B_LOWEST_ACTIVE_PRIORITY;
+               } else
+                       effectivePriority -= 
schedulerThreadData->priority_penalty;
+       }
 
        ASSERT(schedulerThreadData->priority_penalty >= 0);
        ASSERT(effectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
@@ -132,8 +142,12 @@ simple_increase_penalty(Thread *thread)
        int32 oldPenalty = schedulerThreadData->priority_penalty++;
 
        ASSERT(thread->priority - oldPenalty >= B_LOWEST_ACTIVE_PRIORITY);
-       if (thread->priority - oldPenalty <= B_LOWEST_ACTIVE_PRIORITY)
+       const int kMinimalPriority
+               = min_c(thread->priority, 25) / 5;
+       if (thread->priority - oldPenalty <= kMinimalPriority) {
                schedulerThreadData->priority_penalty = oldPenalty;
+               schedulerThreadData->forced_yield_count++;
+       }
 }
 
 

############################################################################

Commit:      547b8c76c718b46174dbac0ae37d3ad6b9d803b5
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Oct  8 02:50:23 2013 UTC

kernel: Cancel penalty only if the thread actually waits

Require the thread to give up CPU for at least one time slice before
cancelling its penalty.

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp 
b/src/system/kernel/scheduler/scheduler_simple.cpp
index 461c601..c8517b6 100644
--- a/src/system/kernel/scheduler/scheduler_simple.cpp
+++ b/src/system/kernel/scheduler/scheduler_simple.cpp
@@ -59,6 +59,8 @@ struct scheduler_thread_data {
        bigtime_t       time_left;
        bigtime_t       stolen_time;
        bigtime_t       quantum_start;
+
+       bigtime_t       went_sleep;
 };
 
 
@@ -71,6 +73,8 @@ scheduler_thread_data::Init()
        time_left = 0;
        stolen_time = 0;
 
+       went_sleep = 0;
+
        lost_cpu = false;
        cpu_bound = true;
 }
@@ -156,9 +160,14 @@ simple_cancel_penalty(Thread *thread)
 {
        scheduler_thread_data* schedulerThreadData
                = 
reinterpret_cast<scheduler_thread_data*>(thread->scheduler_data);
+
+       if (schedulerThreadData->went_sleep < 0
+               || system_time() - schedulerThreadData->went_sleep <= 
kThreadQuantum)
+               return;
        if (schedulerThreadData->priority_penalty != 0)
                TRACE("cancelling thread %ld penalty\n", thread->id);
        schedulerThreadData->priority_penalty = 0;
+       schedulerThreadData->forced_yield_count = 0;
 }
 
 
@@ -170,6 +179,8 @@ simple_enqueue_in_run_queue(Thread *thread)
 {
        thread->state = thread->next_state = B_THREAD_READY;
 
+       simple_cancel_penalty(thread);
+
        int32 threadPriority = simple_get_effective_priority(thread);
        T(EnqueueThread(thread, threadPriority));
 
@@ -339,6 +350,8 @@ simple_reschedule(void)
                                schedulerOldThreadData->cpu_bound = false;
 
                        if (simple_quantum_ended(oldThread, 
oldThread->cpu->preempted)) {
+                               schedulerOldThreadData->went_sleep = -1;
+
                                if (schedulerOldThreadData->cpu_bound)
                                        simple_increase_penalty(oldThread);
                                else
@@ -356,13 +369,13 @@ simple_reschedule(void)
 
                        break;
                case B_THREAD_SUSPENDED:
-                       simple_cancel_penalty(oldThread);
+                       schedulerOldThreadData->went_sleep = system_time();
                        TRACE("reschedule(): suspending thread %ld\n", 
oldThread->id);
                        break;
                case THREAD_STATE_FREE_ON_RESCHED:
                        break;
                default:
-                       simple_cancel_penalty(oldThread);
+                       schedulerOldThreadData->went_sleep = system_time();
                        TRACE("not enqueueing thread %ld into run queue 
next_state = %ld\n",
                                oldThread->id, oldThread->next_state);
                        break;

############################################################################

Commit:      bab69bdb4707498efc97bb3bf3d58d58553b4dbc
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Oct  8 02:53:30 2013 UTC

kernel: Force high priority threads to yield less often

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp 
b/src/system/kernel/scheduler/scheduler_simple.cpp
index c8517b6..950da57 100644
--- a/src/system/kernel/scheduler/scheduler_simple.cpp
+++ b/src/system/kernel/scheduler/scheduler_simple.cpp
@@ -116,8 +116,9 @@ simple_get_effective_priority(Thread *thread)
 
        int32 effectivePriority = thread->priority;
        if (effectivePriority < B_FIRST_REAL_TIME_PRIORITY) {
-               if (schedulerThreadData->forced_yield_count
-                       && schedulerThreadData->forced_yield_count % 16 == 0) {
+               const int kYieldFrequency = 1 << (min_c(thread->priority, 25) / 
5 + 1);
+               if (schedulerThreadData->forced_yield_count != 0
+                       && schedulerThreadData->forced_yield_count % 
kYieldFrequency == 0) {
                        TRACE("forcing thread %ld to yield\n", thread->id);
                        effectivePriority = B_LOWEST_ACTIVE_PRIORITY;
                } else


Other related posts:

  • » [haiku-commits] BRANCH pdziepak-github.scheduler [bab69bd] src/system/kernel/scheduler - pdziepak-github . scheduler