[haiku-commits] r34403 - in haiku/trunk: headers/private/kernel src/system/kernel

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 1 Dec 2009 10:38:34 +0100 (CET)

Author: bonefish
Date: 2009-12-01 10:38:34 +0100 (Tue, 01 Dec 2009)
New Revision: 34403
Changeset: http://dev.haiku-os.org/changeset/34403/haiku

Modified:
   haiku/trunk/headers/private/kernel/lock.h
   haiku/trunk/src/system/kernel/lock.cpp
Log:
Since there were no further complaints: Added mutex_lock_with_timeout().


Modified: haiku/trunk/headers/private/kernel/lock.h
===================================================================
--- haiku/trunk/headers/private/kernel/lock.h   2009-12-01 09:36:23 UTC (rev 
34402)
+++ haiku/trunk/headers/private/kernel/lock.h   2009-12-01 09:38:34 UTC (rev 
34403)
@@ -22,6 +22,7 @@
        thread_id                               holder;
 #else
        int32                                   count;
+       uint16                                  ignore_unlock_count;
 #endif
        uint8                                   flags;
 } mutex;
@@ -81,7 +82,7 @@
 #      define MUTEX_INITIALIZER(name)                  { name, NULL, -1, 0 }
 #      define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
 #else
-#      define MUTEX_INITIALIZER(name)                  { name, NULL, 0, 0 }
+#      define MUTEX_INITIALIZER(name)                  { name, NULL, 0, 0, 0 }
 #      define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 
0 }
 #endif
 
@@ -132,6 +133,8 @@
 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
 extern void _mutex_unlock(mutex* lock, bool threadsLocked);
 extern status_t _mutex_trylock(mutex* lock);
+extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
+       bigtime_t timeout);
 
 
 static inline status_t
@@ -173,6 +176,19 @@
 }
 
 
+static inline status_t
+mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
+{
+#if KDEBUG
+       return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
+#else
+       if (atomic_add(&lock->count, -1) < 0)
+               return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
+       return B_OK;
+#endif
+}
+
+
 static inline void
 mutex_unlock(mutex* lock)
 {

Modified: haiku/trunk/src/system/kernel/lock.cpp
===================================================================
--- haiku/trunk/src/system/kernel/lock.cpp      2009-12-01 09:36:23 UTC (rev 
34402)
+++ haiku/trunk/src/system/kernel/lock.cpp      2009-12-01 09:38:34 UTC (rev 
34403)
@@ -431,6 +431,7 @@
        lock->holder = -1;
 #else
        lock->count = 0;
+       lock->ignore_unlock_count = 0;
 #endif
        lock->flags = 0;
 
@@ -448,6 +449,7 @@
        lock->holder = -1;
 #else
        lock->count = 0;
+       lock->ignore_unlock_count = 0;
 #endif
        lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
 
@@ -574,6 +576,11 @@
                        lock, lock->holder);
                return;
        }
+#else
+       if (lock->ignore_unlock_count > 0) {
+               lock->ignore_unlock_count--;
+               return;
+       }
 #endif
 
        mutex_waiter* waiter = lock->waiters;
@@ -620,6 +627,97 @@
 }
 
 
+status_t
+_mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
+{
+#if KDEBUG
+       if (!gKernelStartup && !are_interrupts_enabled()) {
+               panic("_mutex_lock(): called with interrupts disabled for lock 
%p",
+                       lock);
+       }
+#endif
+
+       InterruptsSpinLocker locker(gThreadSpinlock);
+
+       // Might have been released after we decremented the count, but before
+       // we acquired the spinlock.
+#if KDEBUG
+       if (lock->holder < 0) {
+               lock->holder = thread_get_current_thread_id();
+               return B_OK;
+       } else if (lock->holder == thread_get_current_thread_id()) {
+               panic("_mutex_lock(): double lock of %p by thread %ld", lock,
+                       lock->holder);
+       } else if (lock->holder == 0)
+               panic("_mutex_lock(): using unitialized lock %p", lock);
+#else
+       if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
+               lock->flags &= ~MUTEX_FLAG_RELEASED;
+               return B_OK;
+       }
+#endif
+
+       // enqueue in waiter list
+       mutex_waiter waiter;
+       waiter.thread = thread_get_current_thread();
+       waiter.next = NULL;
+
+       if (lock->waiters != NULL) {
+               lock->waiters->last->next = &waiter;
+       } else
+               lock->waiters = &waiter;
+
+       lock->waiters->last = &waiter;
+
+       // block
+       thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, 
lock);
+       status_t error = thread_block_with_timeout_locked(timeoutFlags, 
timeout);
+
+       if (error == B_OK) {
+#if KDEBUG
+               lock->holder = waiter.thread->id;
+#endif
+       } else {
+               // If the timeout occurred, we must remove our waiter structure 
from
+               // the queue.
+               mutex_waiter* previousWaiter = NULL;
+               mutex_waiter* otherWaiter = lock->waiters;
+               while (otherWaiter != NULL && otherWaiter != &waiter) {
+                       previousWaiter = otherWaiter;
+                       otherWaiter = otherWaiter->next;
+               }
+               if (otherWaiter == &waiter) {
+                       // the structure is still in the list -- dequeue
+                       if (&waiter == lock->waiters) {
+                               if (waiter.next != NULL)
+                                       waiter.next->last = waiter.last;
+                               lock->waiters = waiter.next;
+                       } else {
+                               if (waiter.next == NULL)
+                                       lock->waiters->last = previousWaiter;
+                               previousWaiter->next = waiter.next;
+                       }
+
+#if !KDEBUG
+                       // we need to fix the lock count
+                       if (atomic_add(&lock->count, 1) == -1) {
+                               // This means we were the only thread waiting 
for the lock and
+                               // the lock owner has already called 
atomic_add() in
+                               // mutex_unlock(). That is we probably would 
get the lock very
+                               // soon (if the lock holder has a low priority, 
that might
+                               // actually take rather long, though), but the 
timeout already
+                               // occurred, so we don't try to wait. Just 
increment the ignore
+                               // unlock count.
+                               lock->ignore_unlock_count++;
+                       }
+#endif
+               }
+       }
+
+       return error;
+}
+
+
 static int
 dump_mutex_info(int argc, char** argv)
 {


Other related posts: