[PATCH] Mutex priorities

  • From: Yourself <user@shredder.(none)>
  • Date: Thu, 1 Mar 2012 17:40:01 +0100

---
 headers/private/kernel/lock.h         |   12 +-
 headers/private/kernel/thread.h       |    3 +
 headers/private/kernel/thread_types.h |    6 +
 src/system/kernel/locks/lock.cpp      |  201 ++++++++++++++++++++++++++++++---
 src/system/kernel/thread.cpp          |   30 +++++
 5 files changed, 232 insertions(+), 20 deletions(-)

diff --git a/headers/private/kernel/lock.h b/headers/private/kernel/lock.h
index 4381d21..e9cec2a 100644
--- a/headers/private/kernel/lock.h
+++ b/headers/private/kernel/lock.h
@@ -25,6 +25,8 @@ typedef struct mutex {
        uint16                                  ignore_unlock_count;
 #endif
        uint8                                   flags;
+
+       struct Thread          *holder_thread;
 } mutex;
 
 #define MUTEX_FLAG_CLONE_NAME  0x1
@@ -88,10 +90,10 @@ typedef struct rw_lock {
 
 // static initializers
 #if KDEBUG
-#      define MUTEX_INITIALIZER(name)                  { name, NULL, -1, 0 }
+#      define MUTEX_INITIALIZER(name)                  { name, NULL, -1, 0, 
NULL }
 #      define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
 #else
-#      define MUTEX_INITIALIZER(name)                  { name, NULL, 0, 0, 0 }
+#      define MUTEX_INITIALIZER(name)                  { name, NULL, 0, 0, 0, 
NULL }
 #      define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 
0 }
 #endif
 
@@ -152,7 +154,7 @@ extern void _mutex_unlock(mutex* lock, bool 
schedulerLocked);
 extern status_t _mutex_trylock(mutex* lock);
 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
        bigtime_t timeout);
-
+extern void _mutex_transfer_lock(mutex* lock, thread_id thread);
 
 static inline status_t
 rw_lock_read_lock(rw_lock* lock)
@@ -268,9 +270,7 @@ mutex_unlock(mutex* lock)
 static inline void
 mutex_transfer_lock(mutex* lock, thread_id thread)
 {
-#if KDEBUG
-       lock->holder = thread;
-#endif
+       _mutex_transfer_lock(lock, thread);
 }
 
 
diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h
index 8d1287e..e7e58a8 100644
--- a/headers/private/kernel/thread.h
+++ b/headers/private/kernel/thread.h
@@ -89,6 +89,9 @@ status_t thread_preboot_init_percpu(struct kernel_args *args, 
int32 cpuNum);
 void thread_yield(bool force);
 void thread_exit(void);
 
+void boost_thread_priority(Thread *thread, int32 boost_priority);
+void unboost_thread_priority(Thread *thread);
+
 int32 thread_max_threads(void);
 int32 thread_used_threads(void);
 
diff --git a/headers/private/kernel/thread_types.h 
b/headers/private/kernel/thread_types.h
index 017da64..d023ddb 100644
--- a/headers/private/kernel/thread_types.h
+++ b/headers/private/kernel/thread_types.h
@@ -409,6 +409,7 @@ private:
                        vint32                          fUserDefinedTimerCount; 
// accessed atomically
 };
 
+#define HELD_LOCKS_ARRAY_SIZE 32
 
 struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
        int32                   flags;                  // summary of events 
relevant in interrupt
@@ -423,6 +424,11 @@ struct Thread : TeamThreadIteratorEntry<thread_id>, 
KernelReferenceable {
        int32                   priority;               // protected by 
scheduler lock
        int32                   next_priority;  // protected by scheduler lock
        int32                   io_priority;    // protected by fLock
+
+       bool                    boosted;
+       int32                   pre_boost_priority;
+       mutex*                  held_locks[HELD_LOCKS_ARRAY_SIZE];
+
        int32                   state;                  // protected by 
scheduler lock
        int32                   next_state;             // protected by 
scheduler lock
        struct cpu_ent  *cpu;                   // protected by scheduler lock
diff --git a/src/system/kernel/locks/lock.cpp b/src/system/kernel/locks/lock.cpp
index 1cc5c35..61d4943 100644
--- a/src/system/kernel/locks/lock.cpp
+++ b/src/system/kernel/locks/lock.cpp
@@ -556,6 +556,69 @@ dump_rw_lock_info(int argc, char** argv)
 
 // #pragma mark -
 
+static inline void
+add_lock_to_held_locks(mutex *lock, Thread *thread)
+{
+       ASSERT(thread);
+       ASSERT(lock);
+
+       // Search for first free entry in the array and use it
+       for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) {
+               if (thread->held_locks[i] == NULL) {
+                       thread->held_locks[i] = lock;
+                       return;
+               }
+       }
+}
+
+static inline void
+remove_lock_from_held_locks(mutex *lock, Thread *thread)
+{
+       for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) {
+               if (thread->held_locks[i] == lock) {
+                       thread->held_locks[i] = NULL;
+                       return;
+               }
+       }
+}
+
+static inline void
+add_to_mutex_waiters_list(mutex *lock, mutex_waiter *waiter)
+{
+       if (lock->waiters != NULL) {
+               if (waiter->thread->priority > lock->waiters->thread->priority) 
{
+                       // We have the highest priority of all
+                       // threads currently in the queue, prepend ourselves.
+                       waiter->next = lock->waiters;
+                       waiter->last = lock->waiters->last;
+                       lock->waiters = waiter;
+               } else {
+                       // Search for the first waiter with lower (or equal) 
priority than ours.
+                       mutex_waiter *waiter_iterator = lock->waiters;
+                       mutex_waiter *previous_waiter = NULL;
+
+                       while (waiter_iterator->thread->priority >= 
waiter->thread->priority
+                               && waiter_iterator->next != NULL) {
+                               previous_waiter = waiter_iterator;
+                           waiter_iterator = waiter_iterator->next;
+                       }
+
+                       if (waiter_iterator->next == NULL) {
+                               // We are now the last in the queue, append and 
set 'last' pointer.
+                               waiter_iterator->next = waiter;
+                               lock->waiters->last = waiter;
+                       } else {
+                               // We belong somewhere in the middle of the 
queue, insert ourselves.
+                               waiter->next = waiter_iterator;
+                               previous_waiter->next = waiter;
+                       }
+               }
+       } else {
+               // Nobody else waiting yet, set ourselves as first (and last) 
waiter.
+               lock->waiters = waiter;
+               lock->waiters->last = waiter;
+       }
+}
 
 void
 mutex_init(mutex* lock, const char *name)
@@ -569,6 +632,7 @@ mutex_init(mutex* lock, const char *name)
        lock->ignore_unlock_count = 0;
 #endif
        lock->flags = 0;
+       lock->holder_thread = NULL;
 
        T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
        NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
@@ -587,6 +651,7 @@ mutex_init_etc(mutex* lock, const char *name, uint32 flags)
        lock->ignore_unlock_count = 0;
 #endif
        lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
+       lock->holder_thread = NULL;
 
        T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
        NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
@@ -612,6 +677,11 @@ mutex_destroy(mutex* lock)
        }
 #endif
 
+       // Remove the destroyed lock from held locks array of its holder.
+       Thread *holder = lock->holder_thread;
+       if (holder != NULL)
+               remove_lock_from_held_locks(lock, holder);
+
        while (mutex_waiter* waiter = lock->waiters) {
                // dequeue
                lock->waiters = waiter->next;
@@ -677,6 +747,15 @@ _mutex_lock(mutex* lock, bool schedulerLocked)
 #if KDEBUG
        if (lock->holder < 0) {
                lock->holder = thread_get_current_thread_id();
+               lock->holder_thread = thread_get_current_thread();
+
+               ASSERT(lock->holder >= 0);
+               ASSERT(lock->holder_thread);
+
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, lock->holder_thread);
+
                return B_OK;
        } else if (lock->holder == thread_get_current_thread_id()) {
                panic("_mutex_lock(): double lock of %p by thread %ld", lock,
@@ -686,6 +765,11 @@ _mutex_lock(mutex* lock, bool schedulerLocked)
 #else
        if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
                lock->flags &= ~MUTEX_FLAG_RELEASED;
+
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, thread_get_current_thread());
+
                return B_OK;
        }
 #endif
@@ -695,21 +779,31 @@ _mutex_lock(mutex* lock, bool schedulerLocked)
        waiter.thread = thread_get_current_thread();
        waiter.next = NULL;
 
-       if (lock->waiters != NULL) {
-               lock->waiters->last->next = &waiter;
-       } else
-               lock->waiters = &waiter;
+       add_to_mutex_waiters_list(lock, &waiter);
 
-       lock->waiters->last = &waiter;
+       // The lock is already held by another thread. If this other thread has
+       // a lower priority than ours, boost it so it can release the lock for
+       // us more quickly.
+       Thread *holder_thread  = lock->holder_thread;
+       if (holder_thread != NULL) {
+               if (waiter.thread->priority > holder_thread->priority)
+                       boost_thread_priority(holder_thread, 
waiter.thread->priority + 1);
+       }
 
        // block
        thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, 
lock);
        status_t error = thread_block_locked(waiter.thread);
 
+       if (error == B_OK) {
 #if KDEBUG
-       if (error == B_OK)
                lock->holder = waiter.thread->id;
 #endif
+               lock->holder_thread = waiter.thread;
+
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, waiter.thread);
+       }
 
        return error;
 }
@@ -751,18 +845,66 @@ _mutex_unlock(mutex* lock, bool schedulerLocked)
                // cause a race condition, since another locker could think the 
lock
                // is not held by anyone.
                lock->holder = waiter->thread->id;
+               lock->holder_thread = waiter->thread;
 #endif
        } else {
                // We've acquired the spinlock before the locker that is going 
to wait.
                // Just mark the lock as released.
 #if KDEBUG
                lock->holder = -1;
+               lock->holder_thread = NULL;
 #else
                lock->flags |= MUTEX_FLAG_RELEASED;
 #endif
        }
-}
 
+       // Remove the lock we just unlocked from held locks array and also 
search
+       // for the maximum priority of all threads which are waiting for locks
+       // we hold.
+       Thread *current_thread = thread_get_current_thread();
+       int32 maximum_priority = 0;
+       for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) {
+               mutex *held_lock = current_thread->held_locks[i];
+
+               if (held_lock == lock) {
+                       // Remove from held locks array
+                       current_thread->held_locks[i] = NULL;
+               } else if (held_lock != NULL && held_lock->waiters != NULL) {
+                       // The first thread in the waiters list has the highest 
priority
+                       int32 priority = held_lock->waiters->thread->priority;
+                       if (priority > maximum_priority)
+                               maximum_priority = priority;
+               }
+       }
+
+       // Find out whether we have to boost or unboost our priority.
+       if (waiter == NULL) {
+               // No waiters, do nothing
+               return;
+       }
+
+       if (waiter->thread->boosted) {
+               if (maximum_priority > waiter->thread->pre_boost_priority) {
+                       // We are holding another lock which has a waiter with 
higher
+                       // priority than ours. Boost ourselves again so we can 
release
+                       // that lock as well quickly.
+                       boost_thread_priority(waiter->thread, maximum_priority);
+               } else {
+                       // We are boosted but don't hold any other lock with 
waiters
+                       // with higher priority than ours. Restore our old 
pre-boost
+                       // priority.
+                       unboost_thread_priority(current_thread);
+               }
+       } else if (maximum_priority > waiter->thread->priority) {
+               // We are not boosted anymore because our priority was changed
+               // by someone else while we were boosted (doing that overwrites
+               // the boosted priority and clears the boosted flag).
+               // However, with our current priority, there is another thread
+               // with higher priority than ours waiting for a lock we hold,
+               // so boost ourselves again.
+               boost_thread_priority(waiter->thread, maximum_priority);
+       }
+}
 
 status_t
 _mutex_trylock(mutex* lock)
@@ -772,6 +914,12 @@ _mutex_trylock(mutex* lock)
 
        if (lock->holder <= 0) {
                lock->holder = thread_get_current_thread_id();
+               lock->holder_thread = thread_get_current_thread();
+
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, lock->holder_thread);
+
                return B_OK;
        }
 #endif
@@ -796,6 +944,12 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 
bigtime_t timeout)
 #if KDEBUG
        if (lock->holder < 0) {
                lock->holder = thread_get_current_thread_id();
+               lock->holder_thread = thread_get_current_thread();
+
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, lock->holder_thread);
+
                return B_OK;
        } else if (lock->holder == thread_get_current_thread_id()) {
                panic("_mutex_lock(): double lock of %p by thread %ld", lock,
@@ -805,6 +959,11 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 
bigtime_t timeout)
 #else
        if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
                lock->flags &= ~MUTEX_FLAG_RELEASED;
+
+               lock->holder_thread = thread_get_current_thread();
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+           add_lock_to_held_locks(lock, thread_get_current_thread());
                return B_OK;
        }
 #endif
@@ -814,12 +973,10 @@ _mutex_lock_with_timeout(mutex* lock, uint32 
timeoutFlags, bigtime_t timeout)
        waiter.thread = thread_get_current_thread();
        waiter.next = NULL;
 
-       if (lock->waiters != NULL) {
-               lock->waiters->last->next = &waiter;
-       } else
-               lock->waiters = &waiter;
+       add_to_mutex_waiters_list(lock, &waiter);
 
-       lock->waiters->last = &waiter;
+       // TODO: We could also do priority boosting here, but then
+       //       have to unboost again on timeout.
 
        // block
        thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, 
lock);
@@ -828,7 +985,12 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 
bigtime_t timeout)
        if (error == B_OK) {
 #if KDEBUG
                lock->holder = waiter.thread->id;
+               lock->holder_thread = waiter.thread;
 #endif
+               // Add the lock we just acquired to the array of held locks for
+               // this thread.
+               add_lock_to_held_locks(lock, waiter.thread);
+
        } else {
                // If the timeout occurred, we must remove our waiter structure 
from
                // the queue.
@@ -869,6 +1031,16 @@ _mutex_lock_with_timeout(mutex* lock, uint32 
timeoutFlags, bigtime_t timeout)
        return error;
 }
 
+static void
+_mutex_transfer_lock(mutex* lock, thread_id thread)
+{
+#if KDEBUG
+       remove_lock_from_held_locks(lock, lock->holder_thread);
+       lock->holder = thread;
+       lock->holder_thread = Thread::Get(thread);
+       add_lock_to_held_locks(lock, lock->holder_thread);
+#endif
+}
 
 static int
 dump_mutex_info(int argc, char** argv)
@@ -893,11 +1065,12 @@ dump_mutex_info(int argc, char** argv)
 #else
        kprintf("  count:           %ld\n", lock->count);
 #endif
+       kprintf("  holder_thread    %p\n", lock->holder_thread);
 
-       kprintf("  waiting threads:");
+       kprintf("  waiting threads [priority]:");
        mutex_waiter* waiter = lock->waiters;
        while (waiter != NULL) {
-               kprintf(" %ld", waiter->thread->id);
+               kprintf(" %ld [%ld]", waiter->thread->id, 
waiter->thread->priority);
                waiter = waiter->next;
        }
        kputs("\n");
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
index 6c95268..3cb0311 100644
--- a/src/system/kernel/thread.cpp
+++ b/src/system/kernel/thread.cpp
@@ -169,6 +169,8 @@ Thread::Thread(const char* name, thread_id threadID, struct 
cpu_ent* cpu)
        priority(-1),
        next_priority(-1),
        io_priority(-1),
+       boosted(false),
+       pre_boost_priority(-1),
        cpu(cpu),
        previous_cpu(NULL),
        pinned_to_cpu(0),
@@ -198,6 +200,9 @@ Thread::Thread(const char* name, thread_id threadID, struct 
cpu_ent* cpu)
        post_interrupt_callback(NULL),
        post_interrupt_data(NULL)
 {
+       for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++)
+               held_locks[i] = NULL;
+
        id = threadID >= 0 ? threadID : allocate_thread_id();
        visible = false;
 
@@ -1441,6 +1446,7 @@ set_thread_prio(int argc, char **argv)
                if (thread->id != id)
                        continue;
                thread->priority = thread->next_priority = prio;
+               thread->boosted = false;
                kprintf("thread %ld set to priority %ld\n", id, prio);
                found = true;
                break;
@@ -2816,6 +2822,29 @@ thread_preboot_init_percpu(struct kernel_args *args, 
int32 cpuNum)
        return B_OK;
 }
 
+void
+boost_thread_priority(Thread *thread, int32 boost_priority)
+{
+       ASSERT(!are_interrupts_enabled());
+       ASSERT(thread);
+
+       thread->pre_boost_priority = thread->priority;
+       scheduler_set_thread_priority(thread, boost_priority);
+       thread->boosted = true;
+}
+
+void
+unboost_thread_priority(Thread *thread)
+{
+       ASSERT(!are_interrupts_enabled());
+       ASSERT(thread);
+
+       if (!thread->boosted)
+               return;
+
+       thread->boosted = false;
+       thread->priority = thread->pre_boost_priority;
+}
 
 //     #pragma mark - thread blocking API
 
@@ -3186,6 +3215,7 @@ set_thread_priority(thread_id id, int32 priority)
 
        InterruptsSpinLocker schedulerLocker(gSchedulerLock);
 
+       thread->boosted = false;
        if (thread == thread_get_current_thread()) {
                // It's ourself, so we know we aren't in the run queue, and we 
can
                // manipulate our structure directly.
-- 
1.7.7.2


--Boundary_(ID_J/Ly8tV226VVIZRhe8wW/A)
Content-type: application/x-be_attribute; name="BeOS Attributes"
Content-transfer-encoding: base64
Content-disposition: BMailAttachment; filename="BeOS Attributes"

QkVPUzpUWVBFAE1JTVMAAAAAAAAAC3RleHQvcGxhaW4AYmU6ZW5jb2RpbmcATE9ORwAAAAAAAAAE
AAD//3dyYXAATE9ORwAAAAAAAAAEAAAAAWFsaWdubWVudABMT05HAAAAAAAAAAQAAAAAc3R5bGVz
AFJBV1QAAAAAAAAAoEFsaSEAAAAAAAAAAQAAAABEZWphVnUgU2FucwBHTk9MAQAAAAQAAAANAAAA
AwAAAAMACgBHTk9MAQAAAAQAAAAVAAAABAAAAAMABwBUWUJVQm9vawAAAAAjAAAA/////wMABQBF
VFlCAQAAAAEAAAA6AAAA/////wMABgBSVFNDAQAAAAIAAABAAAAA/////0FAAABCtAAAAEAAAAD/
AABTdHlsZWRFZGl0LWluZm8AUkVDVAAAAAAAAAAQAADgQAAA0EEAgAJEAIDcQ3BlLWluZm8AaW5m
bwAAAAAAAAIRSE1GMQAAAAABAAAA////////////////////////////////rQAAAAwAAAAFAAAA
BgAAAAEAAAAAAAAABQAAAAIAAAADAA8AVENFUgEAAAAQAAAAAAAAAAkAAAADAAoATE9PQgEAAAAB
AAAAHwAAAAMAAAADAAkAR05PTAEAAAAEAAAAKgAAAAQAAAADAAcAR05PTAEAAAAEAAAANwAAAAcA
AAADAAYAR05PTAEAAAAEAAAAQgAAAP////8DAAgAVE9MRgEAAAAEAAAATAAAAP////8DAAgAVE9M
RgEAAAAEAAAAWAAAAAgAAAADAA8ATE9PQgEAAAABAAAAZAAAAP////8DAAkAR05PTAEAAAAEAAAA
dAAAAAoAAAADAAwAR05PTAEAAAAEAAAAgQAAAAsAAAADAAkATE9PQgEAAAABAAAAkQAAAP////8B
AAkAUlRTQwEAAAAJAAAAmwAAAP////93aW5kb3dwb3NpdGlvbgAAACJDAACcQwDAPUQAQF1Ec2hv
dyB0YWJzAABmb250a2luZAAAAAAAYW5jaG9yAAAAAABjYXJldAAAAAAAdnNjcm9sbAAAiMdFaHNj
cm9sbAAAAAAAc3ludGF4Y29sb3JpbmcAAWVuY29kaW5nAAAAAABsaW5lIGJyZWFrcwAAAAAAc29m
dHdyYXAAAGxhbmd1YWdlAAUAAABEaWZmAA==

--Boundary_(ID_J/Ly8tV226VVIZRhe8wW/A)--

--Boundary_(ID_QD2+CJvx21T8a8wn07ynLA)--

Other related posts:

  • » [PATCH] Mutex priorities - Yourself