[haiku-commits] haiku: hrev46755 - src/system/kernel headers/private/kernel src/system/kernel/arch/x86

  • From: pdziepak@xxxxxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Fri, 24 Jan 2014 14:02:34 +0100 (CET)

hrev46755 adds 2 changesets to branch 'master'
old head: b9401835293b0d14f6b50a6458ddb054714fc98f
new head: 82bcd89b92f9c7934845782a1e34f433d51d2f9c
overview: http://cgit.haiku-os.org/haiku/log/?qt=range&q=82bcd89+%5Eb940183

----------------------------------------------------------------------------

03451e4: kernel: Fix deadlock with thread sending signal to itself
  
  UserEvent can be fired from scheduler_reschedule() i.e. while holding current
  thread scheduler_lock. If the current thread goes sleep and during reschedule
  one of its timers sends a signel to it, then scheduler_enqueue_in_run_queue()
  attempts to acquire again its scheduler_lock resulting in a deadlock.
  
  There was also a minor issue with both scheduler_reschedule() and
  scheduler_enqueue_in_run_queue() acquiring current CPU scheduler mode lock.

82bcd89: kernel: Add CPUSet::{Clear, Set}BitAtomic() functions

                                    [ Pawel Dziepak <pdziepak@xxxxxxxxxxx> ]

----------------------------------------------------------------------------

5 files changed, 73 insertions(+), 36 deletions(-)
headers/private/kernel/UserEvent.h         | 11 +++++--
headers/private/kernel/smp.h               | 21 ++++++++++++-
src/system/kernel/UserEvent.cpp            | 41 ++++++++++++++++++--------
src/system/kernel/arch/x86/arch_thread.cpp |  4 +--
src/system/kernel/smp.cpp                  | 32 ++++++++++----------

############################################################################

Commit:      03451e4cc166588ed1b399bb97acaf3a043283ff
URL:         http://cgit.haiku-os.org/haiku/commit/?id=03451e4
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Thu Jan 23 22:53:35 2014 UTC

kernel: Fix deadlock with thread sending signal to itself

UserEvent can be fired from scheduler_reschedule() i.e. while holding current
thread scheduler_lock. If the current thread goes sleep and during reschedule
one of its timers sends a signel to it, then scheduler_enqueue_in_run_queue()
attempts to acquire again its scheduler_lock resulting in a deadlock.

There was also a minor issue with both scheduler_reschedule() and
scheduler_enqueue_in_run_queue() acquiring current CPU scheduler mode lock.

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/UserEvent.h 
b/headers/private/kernel/UserEvent.h
index 34086bc..8e0a931 100644
--- a/headers/private/kernel/UserEvent.h
+++ b/headers/private/kernel/UserEvent.h
@@ -28,11 +28,13 @@ struct UserEvent {
 };
 
 
-struct SignalEvent : UserEvent {
+struct SignalEvent : UserEvent, private DPCCallback {
        virtual                                         ~SignalEvent();
 
                        void                            SetUserValue(union 
sigval userValue);
 
+       virtual status_t                        Fire();
+
 protected:
                        struct EventSignal;
 
@@ -41,6 +43,7 @@ protected:
 
 protected:
                        EventSignal*            fSignal;
+                       int32                           fPendingDPC;
 };
 
 
@@ -48,7 +51,8 @@ struct TeamSignalEvent : SignalEvent {
        static  TeamSignalEvent*        Create(Team* team, uint32 signalNumber,
                                                                        int32 
signalCode, int32 errorCode);
 
-       virtual status_t                        Fire();
+protected:
+       virtual void                            DoDPC(DPCQueue* queue);
 
 private:
                                                                
TeamSignalEvent(Team* team,
@@ -64,7 +68,8 @@ struct ThreadSignalEvent : SignalEvent {
                                                                        int32 
signalCode, int32 errorCode,
                                                                        pid_t 
sendingTeam);
 
-       virtual status_t                        Fire();
+protected:
+       virtual void                            DoDPC(DPCQueue* queue);
 
 private:
                                                                
ThreadSignalEvent(Thread* thread,
diff --git a/src/system/kernel/UserEvent.cpp b/src/system/kernel/UserEvent.cpp
index f0c49b6..46acc63 100644
--- a/src/system/kernel/UserEvent.cpp
+++ b/src/system/kernel/UserEvent.cpp
@@ -56,7 +56,8 @@ private:
 
 SignalEvent::SignalEvent(EventSignal* signal)
        :
-       fSignal(signal)
+       fSignal(signal),
+       fPendingDPC(0)
 {
 }
 
@@ -74,6 +75,24 @@ SignalEvent::SetUserValue(union sigval userValue)
 }
 
 
+status_t
+SignalEvent::Fire()
+{
+       bool wasPending = atomic_get_and_set(&fPendingDPC, 1) != 0;
+       if (wasPending)
+               return B_BUSY;
+
+       if (fSignal->MarkUsed()) {
+               atomic_set(&fPendingDPC, 0);
+               return B_BUSY;
+       }
+
+       DPCQueue::DefaultQueue(B_NORMAL_PRIORITY)->Add(this);
+
+       return B_OK;
+}
+
+
 // #pragma mark - TeamSignalEvent
 
 
@@ -106,12 +125,9 @@ TeamSignalEvent::Create(Team* team, uint32 signalNumber, 
int32 signalCode,
 }
 
 
-status_t
-TeamSignalEvent::Fire()
+void
+TeamSignalEvent::DoDPC(DPCQueue* queue)
 {
-       if (fSignal->MarkUsed())
-               return B_BUSY;
-
        fSignal->AcquireReference();
                // one reference is transferred to send_signal_to_team_locked
 
@@ -125,7 +141,8 @@ TeamSignalEvent::Fire()
        if (error != B_OK || !fSignal->IsPending())
                fSignal->SetUnused();
 
-       return error;
+       // We're no longer queued in the DPC queue, so we can be reused.
+       atomic_set(&fPendingDPC, 0);
 }
 
 
@@ -161,12 +178,9 @@ ThreadSignalEvent::Create(Thread* thread, uint32 
signalNumber, int32 signalCode,
 }
 
 
-status_t
-ThreadSignalEvent::Fire()
+void
+ThreadSignalEvent::DoDPC(DPCQueue* queue)
 {
-       if (fSignal->MarkUsed())
-               return B_BUSY;
-
        fSignal->AcquireReference();
                // one reference is transferred to send_signal_to_team_locked
        InterruptsReadSpinLocker teamLocker(fThread->team_lock);
@@ -181,7 +195,8 @@ ThreadSignalEvent::Fire()
        if (error != B_OK || !fSignal->IsPending())
                fSignal->SetUnused();
 
-       return error;
+       // We're no longer queued in the DPC queue, so we can be reused.
+       atomic_set(&fPendingDPC, 0);
 }
 
 

############################################################################

Revision:    hrev46755
Commit:      82bcd89b92f9c7934845782a1e34f433d51d2f9c
URL:         http://cgit.haiku-os.org/haiku/commit/?id=82bcd89
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Fri Jan 24 00:40:06 2014 UTC

kernel: Add CPUSet::{Clear, Set}BitAtomic() functions

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/smp.h b/headers/private/kernel/smp.h
index 43126eb..0da58a0 100644
--- a/headers/private/kernel/smp.h
+++ b/headers/private/kernel/smp.h
@@ -50,6 +50,9 @@ public:
        inline  void            SetBit(int32 cpu);
        inline  void            ClearBit(int32 cpu);
 
+       inline  void            SetBitAtomic(int32 cpu);
+       inline  void            ClearBitAtomic(int32 cpu);
+
        inline  bool            GetBit(int32 cpu) const;
 
        inline  bool            IsEmpty() const;
@@ -118,7 +121,7 @@ inline void
 CPUSet::SetBit(int32 cpu)
 {
        int32* element = (int32*)&fBitmap[cpu % kArraySize];
-       atomic_or(element, 1u << (cpu / kArraySize));
+       *element |= 1u << (cpu / kArraySize);
 }
 
 
@@ -126,6 +129,22 @@ inline void
 CPUSet::ClearBit(int32 cpu)
 {
        int32* element = (int32*)&fBitmap[cpu % kArraySize];
+       *element &= ~uint32(1u << (cpu / kArraySize));
+}
+
+
+inline void
+CPUSet::SetBitAtomic(int32 cpu)
+{
+       int32* element = (int32*)&fBitmap[cpu % kArraySize];
+       atomic_or(element, 1u << (cpu / kArraySize));
+}
+
+
+inline void
+CPUSet::ClearBitAtomic(int32 cpu)
+{
+       int32* element = (int32*)&fBitmap[cpu % kArraySize];
        atomic_and(element, ~uint32(1u << (cpu / kArraySize)));
 }
 
diff --git a/src/system/kernel/arch/x86/arch_thread.cpp 
b/src/system/kernel/arch/x86/arch_thread.cpp
index 9fbbaa3..11031b5 100644
--- a/src/system/kernel/arch/x86/arch_thread.cpp
+++ b/src/system/kernel/arch/x86/arch_thread.cpp
@@ -229,8 +229,8 @@ arch_thread_context_switch(Thread* from, Thread* to)
                                        != activePagingStructures) {
                // update on which CPUs the address space is used
                int cpu = cpuData->cpu_num;
-               activePagingStructures->active_on_cpus.ClearBit(cpu);
-               toPagingStructures->active_on_cpus.SetBit(cpu);
+               activePagingStructures->active_on_cpus.ClearBitAtomic(cpu);
+               toPagingStructures->active_on_cpus.SetBitAtomic(cpu);
 
                // assign the new paging structures to the CPU
                toPagingStructures->AddReference();
diff --git a/src/system/kernel/smp.cpp b/src/system/kernel/smp.cpp
index 325fa72..4c4b357 100644
--- a/src/system/kernel/smp.cpp
+++ b/src/system/kernel/smp.cpp
@@ -822,7 +822,7 @@ check_for_message(int currentCPU, mailbox_source& 
sourceMailbox)
                        }
 
                        // mark it so we wont try to process this one again
-                       msg->proc_bitmap.ClearBit(currentCPU);
+                       msg->proc_bitmap.ClearBitAtomic(currentCPU);
                        atomic_add(&gCPU[currentCPU].ici_counter, 1);
 
                        sourceMailbox = MAILBOX_BCAST;
@@ -1000,7 +1000,7 @@ static void
 process_early_cpu_call(int32 cpu)
 {
        sEarlyCPUCallFunction(sEarlyCPUCallCookie, cpu);
-       sEarlyCPUCallSet.ClearBit(cpu);
+       sEarlyCPUCallSet.ClearBitAtomic(cpu);
        atomic_add(&sEarlyCPUCallCount, 1);
 }
 
@@ -1112,20 +1112,24 @@ smp_send_multicast_ici(CPUSet& cpuMask, int32 message, 
addr_t data,
                return;
 
        int currentCPU = smp_get_current_cpu();
-       bool broadcast = true;
 
-       // count target CPUs
+       // find_free_message leaves interrupts disabled
+       struct smp_msg *msg;
+       int state = find_free_message(&msg);
+
+       msg->proc_bitmap = cpuMask;
+       msg->proc_bitmap.ClearBit(currentCPU);
+
        int32 targetCPUs = 0;
        for (int32 i = 0; i < sNumCPUs; i++) {
-               if (cpuMask.GetBit(i))
+               if (msg->proc_bitmap.GetBit(i))
                        targetCPUs++;
-               else if (i != currentCPU)
-                       broadcast = false;
        }
 
-       // find_free_message leaves interrupts disabled
-       struct smp_msg *msg;
-       int state = find_free_message(&msg);
+       if (targetCPUs == 0) {
+               panic("smp_send_multicast_ici(): 0 CPU mask");
+               return;
+       }
 
        msg->message = message;
        msg->data = data;
@@ -1136,13 +1140,7 @@ smp_send_multicast_ici(CPUSet& cpuMask, int32 message, 
addr_t data,
        msg->flags = flags;
        msg->done = 0;
 
-       msg->proc_bitmap = cpuMask;
-       msg->proc_bitmap.ClearBit(currentCPU);
-       if (msg->proc_bitmap.IsEmpty()) {
-               panic("smp_send_multicast_ici(): 0 CPU mask");
-               return;
-       }
-
+       bool broadcast = targetCPUs == sNumCPUs - 1;
 
        // stick it in the broadcast mailbox
        acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);


Other related posts:

  • » [haiku-commits] haiku: hrev46755 - src/system/kernel headers/private/kernel src/system/kernel/arch/x86 - pdziepak