[haiku-commits] r41470 - in haiku/branches/developer/bonefish/signals: headers/private/kernel src/system/kernel src/system/kernel/arch/m68k src/system/kernel/arch/ppc src/system/kernel/arch/x86 ...

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Fri, 13 May 2011 15:36:22 +0200 (CEST)

Author: bonefish
Date: 2011-05-13 15:36:22 +0200 (Fri, 13 May 2011)
New Revision: 41470
Changeset: https://dev.haiku-os.org/changeset/41470

Modified:
   haiku/branches/developer/bonefish/signals/headers/private/kernel/thread.h
   
haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h
   
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/m68k/arch_debug.cpp
   
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/ppc/arch_debug.cpp
   
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/x86/arch_debug.cpp
   
haiku/branches/developer/bonefish/signals/src/system/kernel/device_manager/IOSchedulerSimple.cpp
   haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp
Log:
* Removed thread_get_thread_struct[_locked]() and introduced Thread::GetDebug()
  and Thread::IsAlive() instead.
* Removed thread_unblock(), which wasn't used anymore.
* Introduced static sThreadHashLock, which now protects sThreadHash instead of
  gThreadSpinlock.
* Thread::GetAndLock() now rechecks the hash table after locking the thread,
  and therefore, until the caller drops the lock, the thread won't go away.
* Revised the locking in thread.cpp and documented locking semantics of more
  functions and Thread members.


Modified: 
haiku/branches/developer/bonefish/signals/headers/private/kernel/thread.h
===================================================================
--- haiku/branches/developer/bonefish/signals/headers/private/kernel/thread.h   
2011-05-13 07:06:34 UTC (rev 41469)
+++ haiku/branches/developer/bonefish/signals/headers/private/kernel/thread.h   
2011-05-13 13:36:22 UTC (rev 41470)
@@ -61,9 +61,6 @@
 
 #define thread_get_current_thread arch_thread_get_current_thread
 
-Thread *thread_get_thread_struct(thread_id id);
-Thread *thread_get_thread_struct_locked(thread_id id);
-
 static thread_id thread_get_current_thread_id(void);
 static inline thread_id
 thread_get_current_thread_id(void)
@@ -99,7 +96,6 @@
 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
 status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
                        bigtime_t timeout);
-void thread_unblock(status_t threadID, status_t status);
 
 // used in syscalls.c
 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
@@ -199,7 +195,7 @@
        for this thread shall be used.
 
        When the caller determines that the condition for unblocking the thread
-       occurred, it calls thread_unblock[_locked]() to unblock the thread. At 
that
+       occurred, it calls thread_unblock_locked() to unblock the thread. At 
that
        time one of locks that are held when calling thread_prepare_to_block() 
must
        be held. Usually that would be the client lock. In two cases it 
generally
        isn't, however, since the unblocking code doesn't know about the client

Modified: 
haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h
===================================================================
--- 
haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h 
    2011-05-13 07:06:34 UTC (rev 41469)
+++ 
haiku/branches/developer/bonefish/signals/headers/private/kernel/thread_types.h 
    2011-05-13 13:36:22 UTC (rev 41470)
@@ -334,15 +334,15 @@
        int32                   flags;                  // summary of events 
relevant in interrupt
                                                                        // 
handlers (signals pending, user debugging
                                                                        // 
enabled, etc.)
-       Thread                  *all_next;
-       Thread                  *team_next;
+       Thread                  *all_next;              // protected by thread 
hash lock
+       Thread                  *team_next;             // protected by team 
lock and fLock
        Thread                  *queue_next;    // protected by scheduler lock
-       timer                   alarm;
+       timer                   alarm;                  // protected by 
scheduler lock
        thread_id               id;
-       char                    name[B_OS_NAME_LENGTH];
+       char                    name[B_OS_NAME_LENGTH]; // protected by fLock
        int32                   priority;               // protected by 
scheduler lock
        int32                   next_priority;  // protected by scheduler lock
-       int32                   io_priority;
+       int32                   io_priority;    // protected by fLock
        int32                   state;                  // protected by 
scheduler lock
        int32                   next_state;             // protected by 
scheduler lock
        struct cpu_ent  *cpu;                   // protected by scheduler lock
@@ -382,13 +382,15 @@
        struct PrivateConditionVariableEntry *condition_variable_entry;
 
        struct {
-               sem_id          write_sem;
-               sem_id          read_sem;
+               sem_id          write_sem;      // acquired by writers before 
writing
+               sem_id          read_sem;       // release by writers after 
writing, acquired
+                                                               // by this 
thread when reading
                thread_id       sender;
                int32           code;
                size_t          size;
                void*           buffer;
-       } msg;
+       } msg;  // write_sem/read_sem are protected by fLock when accessed by
+                       // others, the other fields are protected by 
write_sem/read_sem
 
        union {
                addr_t          fault_handler;
@@ -406,25 +408,24 @@
                                                        // lock
 
        struct {
-               sem_id          sem;
-               status_t        status;
-               uint16          reason;
-               uint16          signal;
-               struct list     waiters;
-       } exit; // protected by fLock
+               sem_id          sem;            // immutable after thread 
creation
+               status_t        status;         // accessed only by this thread
+               uint16          reason;         // accessed only by this thread
+               uint16          signal;         // accessed only by this thread
+               struct list     waiters;        // protected by fLock
+       } exit;
 
-       // protected by the global threads spinlock
-       struct select_info *select_infos;
+       struct select_info *select_infos;       // protected by fLock
 
        struct thread_debug_info debug_info;
 
        // stack
-       area_id                 kernel_stack_area;
-       addr_t                  kernel_stack_base;
-       addr_t                  kernel_stack_top;
-       area_id                 user_stack_area;
-       addr_t                  user_stack_base;
-       size_t                  user_stack_size;
+       area_id                 kernel_stack_area;      // immutable after 
thread creation
+       addr_t                  kernel_stack_base;      // immutable after 
thread creation
+       addr_t                  kernel_stack_top;       // immutable after 
thread creation
+       area_id                 user_stack_area;        // protected by thread 
lock held
+       addr_t                  user_stack_base;        // protected by thread 
lock held
+       size_t                  user_stack_size;        // protected by thread 
lock held
 
        addr_t                  user_local_storage;
                // usually allocated at the safe side of the stack
@@ -453,7 +454,11 @@
 
        static  Thread*                         Get(thread_id id);
        static  Thread*                         GetAndLock(thread_id id);
+       static  Thread*                         GetDebug(thread_id id);
+                                                                       // in 
kernel debugger only
 
+       static  bool                            IsAlive(thread_id id);
+
                        void*                           operator new(size_t 
size);
                        void*                           operator new(size_t, 
void* pointer);
                        void                            operator delete(void* 
pointer, size_t size);
@@ -472,6 +477,8 @@
                        void                            
UnlockAndReleaseReference()
                                                                        { 
Unlock(); ReleaseReference(); }
 
+                       bool                            IsAlive() const;
+
 private:
                        mutex                           fLock;
 };

Modified: 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/m68k/arch_debug.cpp
===================================================================
--- 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/m68k/arch_debug.cpp
        2011-05-13 07:06:34 UTC (rev 41469)
+++ 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/m68k/arch_debug.cpp
        2011-05-13 13:36:22 UTC (rev 41470)
@@ -129,7 +129,7 @@
        } else {
 // TODO: Add support for stack traces of other threads.
 /*             thread_id id = strtoul(argv[1], NULL, 0);
-               thread = thread_get_thread_struct_locked(id);
+               thread = Thread::GetDebug(id);
                if (thread == NULL) {
                        kprintf("could not find thread %ld\n", id);
                        return 0;

Modified: 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/ppc/arch_debug.cpp
===================================================================
--- 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/ppc/arch_debug.cpp
 2011-05-13 07:06:34 UTC (rev 41469)
+++ 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/ppc/arch_debug.cpp
 2011-05-13 13:36:22 UTC (rev 41470)
@@ -129,7 +129,7 @@
        } else {
 // TODO: Add support for stack traces of other threads.
 /*             thread_id id = strtoul(argv[1], NULL, 0);
-               thread = thread_get_thread_struct_locked(id);
+               thread = Thread::GetDebug(id);
                if (thread == NULL) {
                        kprintf("could not find thread %ld\n", id);
                        return 0;

Modified: 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/x86/arch_debug.cpp
===================================================================
--- 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/x86/arch_debug.cpp
 2011-05-13 07:06:34 UTC (rev 41469)
+++ 
haiku/branches/developer/bonefish/signals/src/system/kernel/arch/x86/arch_debug.cpp
 2011-05-13 13:36:22 UTC (rev 41470)
@@ -387,7 +387,7 @@
 
        if (arg != NULL) {
                thread_id id = strtoul(arg, NULL, 0);
-               thread = thread_get_thread_struct_locked(id);
+               thread = Thread::GetDebug(id);
                if (thread == NULL) {
                        kprintf("could not find thread %ld\n", id);
                        return false;
@@ -854,7 +854,7 @@
                thread = thread_get_current_thread();
        } else if (argc == 2) {
                thread_id id = strtoul(argv[1], NULL, 0);
-               thread = thread_get_thread_struct_locked(id);
+               thread = Thread::GetDebug(id);
                if (thread == NULL) {
                        kprintf("could not find thread %ld\n", id);
                        return 0;
@@ -924,7 +924,7 @@
                return 0;
 
        // get the thread
-       Thread* thread = thread_get_thread_struct_locked(threadID);
+       Thread* thread = Thread::GetDebug(threadID);
        if (thread == NULL) {
                kprintf("Could not find thread with ID \"%s\".\n", 
threadIDString);
                return 0;

Modified: 
haiku/branches/developer/bonefish/signals/src/system/kernel/device_manager/IOSchedulerSimple.cpp
===================================================================
--- 
haiku/branches/developer/bonefish/signals/src/system/kernel/device_manager/IOSchedulerSimple.cpp
    2011-05-13 07:06:34 UTC (rev 41469)
+++ 
haiku/branches/developer/bonefish/signals/src/system/kernel/device_manager/IOSchedulerSimple.cpp
    2011-05-13 13:36:22 UTC (rev 41470)
@@ -810,8 +810,7 @@
        RequestOwnerList existingOwners;
 
        while ((owner = fUnusedRequestOwners.RemoveHead()) != NULL) {
-               if (owner->thread < 0
-                       || thread_get_thread_struct(owner->thread) == NULL) {
+               if (owner->thread < 0 || !Thread::IsAlive(owner->thread)) {
                        if (owner->thread >= 0)
                                fRequestOwners->RemoveUnchecked(owner);
                        owner->team = team;

Modified: haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp
===================================================================
--- haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp      
2011-05-13 07:06:34 UTC (rev 41469)
+++ haiku/branches/developer/bonefish/signals/src/system/kernel/thread.cpp      
2011-05-13 13:36:22 UTC (rev 41470)
@@ -96,6 +96,7 @@
 // thread list
 static Thread sIdleThreads[B_MAX_CPU_COUNT];
 static ThreadHashTable sThreadHash;
+static spinlock sThreadHashLock = B_SPINLOCK_INITIALIZER;
 static thread_id sNextThreadID = 2;
        // ID 1 is allocated for the kernel by Team::Team() behind our back
 
@@ -255,7 +256,7 @@
 /*static*/ Thread*
 Thread::Get(thread_id id)
 {
-       InterruptsSpinLocker threadHashLocker(gThreadSpinlock);
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
        Thread* thread = sThreadHash.Lookup(id);
        if (thread != NULL)
                thread->AcquireReference();
@@ -266,7 +267,8 @@
 /*static*/ Thread*
 Thread::GetAndLock(thread_id id)
 {
-       InterruptsSpinLocker threadHashLocker(gThreadSpinlock);
+       // look it up and acquire a reference
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
        Thread* thread = sThreadHash.Lookup(id);
        if (thread == NULL)
                return NULL;
@@ -274,11 +276,37 @@
        thread->AcquireReference();
        threadHashLocker.Unlock();
 
+       // lock and check, if it is still in the hash table
        thread->Lock();
-       return thread;
+       threadHashLocker.Lock();
+
+       if (sThreadHash.Lookup(id) == thread)
+               return thread;
+
+       threadHashLocker.Unlock();
+
+       // nope, the thread is no longer in the hash table
+       thread->UnlockAndReleaseReference();
+
+       return NULL;
 }
 
 
+/*static*/ Thread*
+Thread::GetDebug(thread_id id)
+{
+       return sThreadHash.Lookup(id);
+}
+
+
+/*static*/ bool
+Thread::IsAlive(thread_id id)
+{
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
+       return sThreadHash.Lookup(id) != NULL;
+}
+
+
 void*
 Thread::operator new(size_t size)
 {
@@ -344,11 +372,23 @@
 }
 
 
+/*!    Checks whether the thread is still in the thread hash table.
+*/
+bool
+Thread::IsAlive() const
+{
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
+
+       return sThreadHash.Lookup(id) != NULL;
+}
+
+
 // #pragma mark - private functions
 
 
 /*!    Inserts a thread into a team.
-       The caller must hold the team's lock and the global threads spinlock.
+       The caller must hold the team's lock, the thread's lock, and the 
scheduler
+       lock.
 */
 static void
 insert_thread_into_team(Team *team, Thread *thread)
@@ -366,7 +406,8 @@
 
 
 /*!    Removes a thread from a team.
-       The caller must hold the team's lock and the global threads spinlock.
+       The caller must hold the team's lock, the thread's lock, and the 
scheduler
+       lock.
 */
 static void
 remove_thread_from_team(Team *team, Thread *thread)
@@ -388,7 +429,9 @@
 }
 
 
-/*! This function gets run by a new thread before anything else */
+/*! This function gets run by a new thread before anything else.
+       Called with interrupts disabled and the scheduler lock held.
+*/
 static void
 thread_kthread_entry(void)
 {
@@ -399,17 +442,24 @@
        if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
                user_debug_thread_scheduled(thread);
 
-       // simulates the thread spinlock release that would occur if the thread 
had been
-       // rescheded from. The resched didn't happen because the thread is new.
-       RELEASE_THREAD_LOCK();
-
        // start tracking time
        thread->last_time = system_time();
 
-       enable_interrupts(); // this essentially simulates a 
return-from-interrupt
+       // unlock the scheduler lock and enable interrupts
+       release_spinlock(&gSchedulerLock);
+       enable_interrupts();
+
+       // Return to the actual thread function (the one passed to
+       // spawn_[kernel_]thread()), which will eventually return to
+       // thread_kthread_exit(). That happens due to the way the thread's 
initial
+       // stack has been set up -- as three nested function calls
+       // (inner/topmost: thread_kthread_entry() <- thread function
+       // <- thread_kthread_exit).
 }
 
 
+/*!    This function is called when a thread function returns.
+*/
 static void
 thread_kthread_exit(void)
 {
@@ -460,15 +510,13 @@
 static thread_id
 create_thread(KernelThreadCreationAttributes& attributes, bool kernel)
 {
-       Thread *thread, *currentThread;
        char stack_name[B_OS_NAME_LENGTH];
        status_t status;
-       bool debugNewThread = false;
 
        TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
                attributes.thread, kernel ? "kernel" : "user"));
 
-       thread = new Thread(attributes.name, attributes.thread, NULL);
+       Thread* thread = new Thread(attributes.name, attributes.thread, NULL);
        if (thread == NULL)
                return B_NO_MEMORY;
        status = thread->Init(false);
@@ -527,14 +575,13 @@
                return B_BAD_TEAM_ID;
        }
 
-       InterruptsSpinLocker threadsLocker(gThreadSpinlock);
-
        // If the new thread belongs to the same team as the current thread,
        // it may inherit some of the thread debug flags.
-       currentThread = thread_get_current_thread();
-       if (currentThread && currentThread->team->id == attributes.team) {
+       bool debugNewThread = false;
+       Thread* currentThread = thread_get_current_thread();
+       if (currentThread != NULL && currentThread->team->id == 
attributes.team) {
                // inherit all user flags...
-               int32 debugFlags = currentThread->debug_info.flags
+               int32 debugFlags = atomic_get(&currentThread->debug_info.flags)
                        & B_THREAD_DEBUG_USER_FLAG_MASK;
 
                // ... save the syscall tracing flags, unless explicitely 
specified
@@ -549,6 +596,10 @@
                debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
        }
 
+       ThreadLocker threadLocker (thread);
+       InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+       SpinLocker threadHashLocker(sThreadHashLock);
+
        // insert into global list
        sThreadHash.InsertUnchecked(thread);
        sUsedThreads++;
@@ -557,18 +608,19 @@
        // Debug the new thread, if the parent thread required that (see above),
        // or the respective global team debug flag is set. But only, if a
        // debugger is installed for the team.
-       debugNewThread |= (atomic_get(&team->debug_info.flags)
-               & B_TEAM_DEBUG_STOP_NEW_THREADS);
+       int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
+       debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0;
        if (debugNewThread
-               && (atomic_get(&team->debug_info.flags)
-                       & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
+               && (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) {
                thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
        }
 
        // insert thread into team
        insert_thread_into_team(team, thread);
 
-       threadsLocker.Unlock();
+       threadHashLocker.Unlock();
+       schedulerLocker.Unlock();
+       threadLocker.Unlock();
        teamLocker.Unlock();
 
        thread->args1 = attributes.args1;
@@ -592,45 +644,71 @@
                // the stack will be between USER_STACK_REGION and the main 
thread stack
                // area (the user stack of the main thread is created in
                // team_create_team())
-               if (attributes.stack_address == NULL) {
-                       thread->user_stack_base = USER_STACK_REGION;
-                       if (attributes.stack_size <= 0)
-                               thread->user_stack_size = USER_STACK_SIZE;
+               area_id stackArea = -1;
+               uint8* stackBase = (uint8*)attributes.stack_address;
+               size_t stackSize = attributes.stack_size;
+
+               if (stackBase != NULL) {
+                       // A stack has been specified. It must be large enough 
to also hold
+                       // the TLS space. If not, just allocate the standard 
stack.
+                       if (stackSize <= TLS_SIZE) {
+                               stackBase = NULL;
+                               stackSize = 0;
+                       } else
+                               stackBase -= TLS_SIZE;
+               }
+
+               if (stackBase == NULL) {
+                       // no user-defined stack -- allocate one
+                       stackBase = (uint8*)(addr_t)USER_STACK_REGION;
+
+                       if (stackSize == 0)
+                               stackSize = USER_STACK_SIZE;
                        else
-                               thread->user_stack_size = 
PAGE_ALIGN(attributes.stack_size);
-                       thread->user_stack_size += USER_STACK_GUARD_PAGES * 
B_PAGE_SIZE;
+                               stackSize = PAGE_ALIGN(stackSize);
+                       stackSize += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
 
                        snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
                                attributes.name, thread->id);
                        virtual_address_restrictions virtualRestrictions = {};
-                       virtualRestrictions.address = 
(void*)thread->user_stack_base;
+                       virtualRestrictions.address = (void*)stackBase;
                        virtualRestrictions.address_specification = 
B_BASE_ADDRESS;
                        physical_address_restrictions physicalRestrictions = {};
-                       thread->user_stack_area = create_area_etc(team->id, 
stack_name,
-                               thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
+                       stackArea = create_area_etc(team->id, stack_name,
+                               stackSize + TLS_SIZE, B_NO_LOCK,
                                B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
                                &virtualRestrictions, &physicalRestrictions,
-                               (void**)&thread->user_stack_base);
-                       if (thread->user_stack_area < B_OK
-                               || arch_thread_init_tls(thread) < B_OK) {
+                               (void**)&stackBase);
+                       if (stackArea < 0) {
                                // great, we have a fully running thread 
without a (usable)
                                // stack
-                               dprintf("create_thread: unable to create proper 
user stack!\n");
-                               status = thread->user_stack_area;
-                               kill_thread(thread->id);
+                               dprintf("create_thread: unable to create user 
stack!\n");
+                               status = stackArea;
                        }
-               } else {
-                       thread->user_stack_base = 
(addr_t)attributes.stack_address;
-                       thread->user_stack_size = attributes.stack_size;
                }
 
+               // set the stack and init the TLS space
+               if (status == B_OK) {
+                       threadLocker.Lock();
+                       thread->user_stack_base = (addr_t)stackBase;
+                       thread->user_stack_size = stackSize;
+                       thread->user_stack_area = stackArea;
+                       threadLocker.Unlock();
+
+                       status = arch_thread_init_tls(thread);
+               }
+
                user_debug_update_new_thread_flags(thread->id);
 
-               // copy the user entry over to the args field in the thread 
struct
-               // the function this will call will immediately switch the 
thread into
-               // user space.
+               // Copy the user entry over to the args field in the thread 
struct.
+               // The function this will call will immediately switch the 
thread
+               // into user space.
                arch_thread_init_kthread_stack(thread, 
&_create_user_thread_kentry,
                        &thread_kthread_entry, &thread_kthread_exit);
+
+               // If something went wrong, kill the thread.
+               if (status != B_OK)
+                       kill_thread(thread->id);
        }
 
        return status;
@@ -642,20 +720,20 @@
 {
        while (true) {
                // wait for a thread to bury
-               InterruptsSpinLocker locker(gThreadSpinlock);
+               InterruptsSpinLocker schedulerLocker(gSchedulerLock);
 
                while (sUndertakerEntries.IsEmpty()) {
                        ConditionVariableEntry conditionEntry;
                        sUndertakerCondition.Add(&conditionEntry);
-                       locker.Unlock();
+                       schedulerLocker.Unlock();
 
                        conditionEntry.Wait();
 
-                       locker.Lock();
+                       schedulerLocker.Lock();
                }
 
                UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
-               locker.Unlock();
+               schedulerLocker.Unlock();
 
                UndertakerEntry entry = *_entry;
                        // we need a copy, since the original entry is on the 
thread's stack
@@ -670,20 +748,32 @@
                // unaccessible
                Team* kernelTeam = team_get_kernel_team();
                TeamLocker kernelTeamLocker(kernelTeam);
+               thread->Lock();
+               schedulerLocker.Lock();
 
                remove_thread_from_team(kernelTeam, thread);
 
+               schedulerLocker.Unlock();
                kernelTeamLocker.Unlock();
 
                // free the thread structure
-               thread->ReleaseReference();
+               thread->UnlockAndReleaseReference();
        }
 
-       // never can get here
+       // can never get here
        return B_OK;
 }
 
 
+/*!    Returns the semaphore the thread is currently waiting on.
+
+       The return value is purely informative.
+       The caller must hold the scheduler lock.
+
+       \param thread The thread.
+       \return The ID of the semaphore the thread is currently waiting on or 
\c -1,
+               if it isn't waiting on a semaphore.
+*/
 static sem_id
 get_thread_wait_sem(Thread* thread)
 {
@@ -696,7 +786,7 @@
 
 
 /*!    Fills the thread_info structure with information from the specified 
thread.
-       The caller must hold the global threads spinlock.
+       The caller must hold the thread's lock and the scheduler lock.
 */
 static void
 fill_thread_info(Thread *thread, thread_info *info, size_t size)
@@ -706,6 +796,8 @@
 
        strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
 
+       info->sem = -1;
+
        if (thread->state == B_THREAD_WAITING) {
                info->state = B_THREAD_WAITING;
 
@@ -719,6 +811,8 @@
                                sem_id sem = 
(sem_id)(addr_t)thread->wait.object;
                                if (sem == thread->msg.read_sem)
                                        info->state = B_THREAD_RECEIVING;
+                               else
+                                       info->sem = sem;
                                break;
                        }
 
@@ -735,7 +829,6 @@
        info->stack_base = (void *)thread->user_stack_base;
        info->stack_end = (void *)(thread->user_stack_base
                + thread->user_stack_size);
-       info->sem = get_thread_wait_sem(thread);
 }
 
 
@@ -743,29 +836,23 @@
 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
        int32 flags)
 {
-       Thread *target;
-       sem_id cachedSem;
-       cpu_status state;
-       status_t status;
-
-       state = disable_interrupts();
-       GRAB_THREAD_LOCK();
-       target = thread_get_thread_struct_locked(id);
-       if (!target) {
-               RELEASE_THREAD_LOCK();
-               restore_interrupts(state);
+       // get the thread
+       Thread *target = Thread::Get(id);
+       if (target == NULL)
                return B_BAD_THREAD_ID;
-       }
-       cachedSem = target->msg.write_sem;
-       RELEASE_THREAD_LOCK();
-       restore_interrupts(state);
+       BReference<Thread> targetReference(target, true);
 
+       // get the write semaphore
+       ThreadLocker targetLocker(target);
+       sem_id cachedSem = target->msg.write_sem;
+       targetLocker.Unlock();
+
        if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
                return B_NO_MEMORY;
 
-       status = acquire_sem_etc(cachedSem, 1, flags, 0);
+       status_t status = acquire_sem_etc(cachedSem, 1, flags, 0);
        if (status == B_INTERRUPTED) {
-               // We got interrupted by a signal
+               // we got interrupted by a signal
                return status;
        }
        if (status != B_OK) {
@@ -785,14 +872,11 @@
        } else
                data = NULL;
 
-       state = disable_interrupts();
-       GRAB_THREAD_LOCK();
+       targetLocker.Lock();
 
-       // The target thread could have been deleted at this point
-       target = thread_get_thread_struct_locked(id);
-       if (target == NULL) {
-               RELEASE_THREAD_LOCK();
-               restore_interrupts(state);
+       // The target thread could have been deleted at this point.
+       if (!target->IsAlive()) {
+               targetLocker.Unlock();
                free(data);
                return B_BAD_THREAD_ID;
        }
@@ -804,8 +888,7 @@
        target->msg.buffer = data;
        cachedSem = target->msg.read_sem;
 
-       RELEASE_THREAD_LOCK();
-       restore_interrupts(state);
+       targetLocker.Unlock();
 
        release_sem(cachedSem);
        return B_OK;
@@ -817,11 +900,10 @@
        int32 flags)
 {
        Thread *thread = thread_get_current_thread();
-       status_t status;
        size_t size;
        int32 code;
 
-       status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
+       status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
        if (status != B_OK) {
                // Actually, we're not supposed to return error codes
                // but since the only reason this can fail is that we
@@ -1420,10 +1502,11 @@
 
        // Cancel previously installed alarm timer, if any. Hold the scheduler 
lock
        // to make sure that when cancel_timer() returns, the alarm timer hook 
will
-       // not be invoked (since B_TIMER_ACQUIRE_SCHEDULER_LOCK is used).
-       InterruptsSpinLocker schedulerLocker(gSchedulerLock);
-       cancel_timer(&thread->alarm);
-       schedulerLocker.Unlock();
+       // not be invoked anymore (since B_TIMER_ACQUIRE_SCHEDULER_LOCK is 
used).
+       {
+               InterruptsSpinLocker schedulerLocker(gSchedulerLock);
+               cancel_timer(&thread->alarm);
+       }
 
        // remember the user stack area -- we will delete it below
        area_id userStackArea = -1;
@@ -1463,12 +1546,14 @@
                        team->Lock();
                }
 
+               ThreadLocker threadLocker(thread);
+
                state = disable_interrupts();
 
                // swap address spaces, to make sure we're running on the 
kernel's pgdir
                vm_swap_address_space(team->address_space, 
VMAddressSpace::Kernel());
 
-               GRAB_THREAD_LOCK();
+               SpinLocker schedulerLocker(gSchedulerLock);
                        // removing the thread and putting its death entry to 
the parent
                        // team needs to be an atomic operation
 
@@ -1510,9 +1595,11 @@
                                        death = NULL;
                        }
 
-                       RELEASE_THREAD_LOCK();
+                       schedulerLocker.Unlock();
                        restore_interrupts(state);
 
+                       threadLocker.Unlock();
+
                        // Get a temporary reference to the team's process group
                        // -- team_remove_team() removes the team from the 
group, which
                        // might destroy it otherwise and we wouldn't be able 
to unlock it.
@@ -1564,9 +1651,10 @@
                                }
                        }
 
-                       RELEASE_THREAD_LOCK();
+                       schedulerLocker.Unlock();
                        restore_interrupts(state);
 
+                       threadLocker.Unlock();
                        team->Unlock();
                        kernelTeam->Unlock();
                }
@@ -1584,12 +1672,16 @@
                delete death;
        }
 
+       ThreadLocker threadLocker(thread);
+
        state = disable_interrupts();
-       GRAB_THREAD_LOCK();
+       SpinLocker schedulerLocker(gSchedulerLock);
 
        // remove thread from hash, so it's no longer accessible
+       SpinLocker threadHashLocker(sThreadHashLock);
        sThreadHash.RemoveUnchecked(thread);
        sUsedThreads--;
+       threadHashLocker.Unlock();
 
        // Stop debugging for this thread
        SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
@@ -1601,9 +1693,11 @@
        select_info* selectInfos = thread->select_infos;
        thread->select_infos = NULL;
 
-       RELEASE_THREAD_LOCK();
+       schedulerLocker.Unlock();
        restore_interrupts(state);
 
+       threadLocker.Unlock();
+
        destroy_thread_debug_info(&debugInfo);
 
        // notify select infos
@@ -1640,10 +1734,8 @@
        // for us
        {
                sem_id cachedExitSem = thread->exit.sem;
-               cpu_status state;
 
-               state = disable_interrupts();
-               GRAB_THREAD_LOCK();
+               ThreadLocker threadLocker(thread);
 
                // make sure no one will grab this semaphore again
                thread->exit.sem = -1;
@@ -1657,8 +1749,7 @@
                        entry->signal = thread->exit.signal;
                }
 
-               RELEASE_THREAD_LOCK();
-               restore_interrupts(state);
+               threadLocker.Unlock();
 
                delete_sem(cachedExitSem);
        }
@@ -1685,7 +1776,7 @@
        UndertakerEntry undertakerEntry(thread, teamID);
 
        disable_interrupts();
-       GRAB_THREAD_LOCK();
+       schedulerLocker.Lock();
 
        sUndertakerEntries.Add(&undertakerEntry);
        sUndertakerCondition.NotifyOne(true);
@@ -1697,31 +1788,6 @@
 }
 
 
-Thread *
-thread_get_thread_struct(thread_id id)
-{
-       Thread *thread;
-       cpu_status state;
-
-       state = disable_interrupts();
-       GRAB_THREAD_LOCK();
-
-       thread = thread_get_thread_struct_locked(id);
-
-       RELEASE_THREAD_LOCK();
-       restore_interrupts(state);
-
-       return thread;
-}
-
-
-Thread *
-thread_get_thread_struct_locked(thread_id id)
-{
-       return sThreadHash.Lookup(id);
-}
-
-
 /*!    Called in the interrupt handler code when a thread enters
        the kernel for any reason.
        Only tracks time for now.
@@ -1881,6 +1947,7 @@
 Thread*
 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
 {
+// TODO: Solve this differently!
        for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
                        Thread* thread = it.Next();) {
                if (callback(thread, cookie))
@@ -1894,7 +1961,7 @@
 thread_id
 allocate_thread_id()
 {
-       InterruptsSpinLocker locker(gThreadSpinlock);
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
 
        // find the next unused ID
        thread_id id;
@@ -1913,7 +1980,7 @@
                // just been removed (then it doesn't really exist anymore -- 
other
                // threads starting to wait for it (a bit late) might get 
confused,
                // though).
-       } while (thread_get_thread_struct_locked(id) != NULL);
+       } while (sThreadHash.Lookup(id) != NULL);
 
        return id;
 }
@@ -1922,7 +1989,7 @@
 thread_id
 peek_next_thread_id()
 {
-       InterruptsSpinLocker locker(gThreadSpinlock);
+       InterruptsSpinLocker threadHashLocker(sThreadHashLock);
        return sNextThreadID;
 }
 
@@ -1946,16 +2013,12 @@
                if (thread == NULL)
                        return;
 
-               state = disable_interrupts();
-               GRAB_THREAD_LOCK();
+               InterruptsSpinLocker _(gSchedulerLock);
 
                // mark the thread as yielded, so it will not be scheduled next
                //thread->was_yielded = true;
                thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
                scheduler_reschedule();
-
-               RELEASE_THREAD_LOCK();
-               restore_interrupts(state);
 #endif
        } else {
                Thread *thread = thread_get_current_thread();
@@ -1963,7 +2026,7 @@
                        return;
 
                // Don't force the thread off the CPU, just reschedule.
-               InterruptsSpinLocker _(gThreadSpinlock);
+               InterruptsSpinLocker _(gSchedulerLock);
                scheduler_reschedule();
        }
 }
@@ -1997,33 +2060,30 @@
 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
        status_t *_returnCode)
 {
-       sem_id exitSem = B_BAD_THREAD_ID;
-       struct death_entry death;
        job_control_entry* freeDeath = NULL;
-       Thread *thread;
-       cpu_status state;
        status_t status = B_OK;
 
        if (id < B_OK)
                return B_BAD_THREAD_ID;
 
-       // we need to resume the thread we're waiting for first
+       // get the thread, queue our death entry, and fetch the semaphore we 
have to
+       // wait on
+       sem_id exitSem = B_BAD_THREAD_ID;
+       struct death_entry death;
 
-       state = disable_interrupts();
-       GRAB_THREAD_LOCK();
-
-       thread = thread_get_thread_struct_locked(id);
+       Thread* thread = Thread::GetAndLock(id);
        if (thread != NULL) {
                // remember the semaphore we have to wait on and place our 
death entry
                exitSem = thread->exit.sem;
                list_add_link_to_head(&thread->exit.waiters, &death);
+
+               thread->UnlockAndReleaseReference();
+                       // Note: We mustn't dereference the pointer afterwards, 
only check
+                       // it.
        }
 
        death_entry* threadDeathEntry = NULL;
 
-       RELEASE_THREAD_LOCK();
-       restore_interrupts(state);
-
        if (thread == NULL) {
                // we couldn't find this thread -- maybe it's already gone, and 
we'll
                // find its death entry in our team
@@ -2067,7 +2127,7 @@
 
        // we need to wait for the death of the thread
 
-       if (exitSem < B_OK)
+       if (exitSem < 0)
                return B_BAD_THREAD_ID;
 
        resume_thread(id);
@@ -2085,22 +2145,20 @@
                if (_returnCode)
                        *_returnCode = death.status;
        } else {
-               // We were probably interrupted; we need to remove our death 
entry now.
-               state = disable_interrupts();
-               GRAB_THREAD_LOCK();
-
-               thread = thread_get_thread_struct_locked(id);
-               if (thread != NULL)
+               // We were probably interrupted or the timeout occurred; we 
need to
+               // remove our death entry now.
+               thread = Thread::GetAndLock(id);
+               if (thread != NULL) {
                        list_remove_link(&death);
-
-               RELEASE_THREAD_LOCK();
-               restore_interrupts(state);
-
-               // If the thread is already gone, we need to wait 
uninterruptibly for
-               // its exit semaphore to make sure our death entry stays valid 
-- it
-               // won't take long.
-               if (thread == NULL)
+                       thread->UnlockAndReleaseReference();
+               } else {
+                       // The thread is already gone, so we need to wait 
uninterruptibly
+                       // for its exit semaphore to make sure our death entry 
stays valid.
+                       // It won't take long, since the thread is apparently 
already in the
+                       // middle of the cleanup.
                        acquire_sem(exitSem);
+                       status = B_OK;
+               }
        }
 

[... truncated: 278 lines follow ...]

Other related posts:

  • » [haiku-commits] r41470 - in haiku/branches/developer/bonefish/signals: headers/private/kernel src/system/kernel src/system/kernel/arch/m68k src/system/kernel/arch/ppc src/system/kernel/arch/x86 ... - ingo_weinhold