Author: bonefish Date: 2010-01-07 16:31:29 +0100 (Thu, 07 Jan 2010) New Revision: 34935 Changeset: http://dev.haiku-os.org/changeset/34935/haiku Modified: haiku/trunk/headers/private/kernel/lock.h haiku/trunk/src/system/kernel/lock.cpp haiku/trunk/src/tests/add-ons/kernel/kernelland_emu/lock.cpp Log: Added new mutex_switch_from_read_lock() for unlocking a read lock and starting to lock a mutex in an atomic operation. Modified: haiku/trunk/headers/private/kernel/lock.h =================================================================== --- haiku/trunk/headers/private/kernel/lock.h 2010-01-07 14:09:56 UTC (rev 34934) +++ haiku/trunk/headers/private/kernel/lock.h 2010-01-07 15:31:29 UTC (rev 34935) @@ -124,7 +124,6 @@ extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags); extern void rw_lock_destroy(rw_lock* lock); extern status_t rw_lock_write_lock(rw_lock* lock); -extern void rw_lock_write_unlock(rw_lock* lock); extern void mutex_init(mutex* lock, const char* name); // name is *not* cloned nor freed in mutex_destroy() @@ -135,12 +134,16 @@ // for the lock is atomically. I.e. if "from" guards the object "to" belongs // to, the operation is safe as long as "from" is held while destroying // "to". +extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to); + // Like mutex_switch_lock(), just for a switching from a read-locked + // rw_lock. // implementation private: extern status_t _rw_lock_read_lock(rw_lock* lock); -extern void _rw_lock_read_unlock(rw_lock* lock); +extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked); +extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked); extern status_t _mutex_lock(mutex* lock, bool threadsLocked); extern void _mutex_unlock(mutex* lock, bool threadsLocked); @@ -171,11 +174,18 @@ #else int32 oldCount = atomic_add(&lock->count, -1); if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) - _rw_lock_read_unlock(lock); + _rw_lock_read_unlock(lock, false); #endif } +static inline void +rw_lock_write_unlock(rw_lock* lock) +{ + _rw_lock_write_unlock(lock, false); +} + + static inline status_t mutex_lock(mutex* lock) { Modified: haiku/trunk/src/system/kernel/lock.cpp =================================================================== --- haiku/trunk/src/system/kernel/lock.cpp 2010-01-07 14:09:56 UTC (rev 34934) +++ haiku/trunk/src/system/kernel/lock.cpp 2010-01-07 15:31:29 UTC (rev 34935) @@ -323,9 +323,9 @@ void -_rw_lock_read_unlock(rw_lock* lock) +_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked) { - InterruptsSpinLocker locker(gThreadSpinlock); + InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked); // If we're still holding the write lock or if there are other readers, // no-one can be woken up. @@ -389,9 +389,9 @@ void -rw_lock_write_unlock(rw_lock* lock) +_rw_lock_write_unlock(rw_lock* lock, bool threadsLocked) { - InterruptsSpinLocker locker(gThreadSpinlock); + InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked); if (thread_get_current_thread_id() != lock->holder) { panic("rw_lock_write_unlock(): lock %p not write-locked by this thread", @@ -559,6 +559,23 @@ status_t +mutex_switch_from_read_lock(rw_lock* from, mutex* to) +{ + InterruptsSpinLocker locker(gThreadSpinlock); + +#if KDEBUG_RW_LOCK_DEBUG + _rw_lock_write_unlock(from, true); +#else + int32 oldCount = atomic_add(&from->count, -1); + if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) + _rw_lock_read_unlock(from, true); +#endif + + return mutex_lock_threads_locked(to); +} + + +status_t _mutex_lock(mutex* lock, bool threadsLocked) { #if KDEBUG Modified: haiku/trunk/src/tests/add-ons/kernel/kernelland_emu/lock.cpp =================================================================== --- haiku/trunk/src/tests/add-ons/kernel/kernelland_emu/lock.cpp 2010-01-07 14:09:56 UTC (rev 34934) +++ haiku/trunk/src/tests/add-ons/kernel/kernelland_emu/lock.cpp 2010-01-07 15:31:29 UTC (rev 34935) @@ -360,9 +360,9 @@ void -_rw_lock_read_unlock(rw_lock* lock) +_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked) { - AutoLocker<ThreadSpinlock> locker(sThreadSpinlock); + AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked); // If we're still holding the write lock or if there are other readers, // no-one can be woken up. @@ -425,9 +425,9 @@ void -rw_lock_write_unlock(rw_lock* lock) +_rw_lock_write_unlock(rw_lock* lock, bool threadsLocked) { - AutoLocker<ThreadSpinlock> locker(sThreadSpinlock); + AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked); if (find_thread(NULL) != lock->holder) { panic("rw_lock_write_unlock(): lock %p not write-locked by this thread", @@ -549,6 +549,23 @@ status_t +mutex_switch_from_read_lock(rw_lock* from, mutex* to) +{ + AutoLocker<ThreadSpinlock> locker(sThreadSpinlock); + +#if KDEBUG_RW_LOCK_DEBUG + _rw_lock_write_unlock(from, true); +#else + int32 oldCount = atomic_add(&from->count, -1); + if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) + _rw_lock_read_unlock(from, true); +#endif + + return mutex_lock_threads_locked(to); +} + + +status_t _mutex_lock(mutex* lock, bool threadsLocked) { // lock only, if !threadsLocked