[haiku-commits] BRANCH pdziepak-github.scheduler [d929eb2] src/system/kernel src/system/libroot/os/arch/x86 src/tools/fs_shell docs/user/support headers/os/support

  • From: pdziepak-github.scheduler <community@xxxxxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 5 Nov 2013 23:00:35 +0100 (CET)

added 4 changesets to branch 'refs/remotes/pdziepak-github/scheduler'
old head: f4b088a992fefefc87aae57643aca05d81826cca
new head: d929eb20ba2f9f884a9013b14b3fd85cbad4ba12
overview: https://github.com/pdziepak/Haiku/compare/f4b088a...d929eb2

----------------------------------------------------------------------------

e7dba86: kernel: User{Event, Timer}: Use atomic access where necessary

077c84e: kernel: atomic_*() functions rework
  
   * No need for the atomically changed variables to be declared as
     volatile.
   * Drop support for atomically getting and setting unaligned data.
   * Introduce atomic_get_and_set[64]() which works the same as
     atomic_set[64]() used to. atomic_set[64]() does not return the
     previous value anymore.

273f2f3: kernel: Improve spinlock implementation
  
  atomic_or() and atomic_and() are not supported by x86 are need to be
  emulated using CAS. Use atomic_get_and_set() and atomic_set() instead.

d929eb2: docs: Update atomic_*() functions documentation

                                    [ Pawel Dziepak <pdziepak@xxxxxxxxxxx> ]

----------------------------------------------------------------------------

49 files changed, 450 insertions(+), 294 deletions(-)
docs/user/support/SupportDefs.dox                |  82 ++++++++---
headers/build/HaikuBuildCompatibility.h          |  20 +--
headers/os/drivers/KernelExport.h                |   8 +-
headers/os/support/String.h                      |   4 +-
headers/os/support/SupportDefs.h                 |  37 ++---
headers/private/firewire/fwglue.h                |   2 +-
headers/private/fs_shell/fssh_api_wrapper.h      |   1 +
headers/private/fs_shell/fssh_atomic.h           |  28 ++--
headers/private/kernel/smp.h                     |  16 +--
headers/private/kernel/util/atomic.h             |  25 +++-
headers/private/libroot/pthread_private.h        |   4 +-
headers/private/package/TempfileManager.h        |   2 +-
headers/private/shared/Referenceable.h           |   2 +-
headers/private/shared/WeakReferenceable.h       |   2 +-
headers/private/shared/locks.h                   |   2 +-
headers/private/support/StringPrivate.h          |   4 +-
src/add-ons/kernel/bus_managers/ps2/ps2_dev.h    |   2 +-
.../kernel/drivers/input/usb_hid/HIDDevice.cpp   |   2 +-
.../drivers/power/acpi_battery/acpi_battery.h    |   2 +-
.../enhanced_speedstep/enhanced_speedstep.cpp    |   2 +-
.../enhanced_speedstep/enhanced_speedstep.h      |   2 +-
.../kernel/file_systems/nfs4/FileSystem.h        |   2 +-
.../network/datalink_protocols/arp/arp.cpp       |   2 +-
.../ipv6_datagram/ipv6_datagram.cpp              |   2 +-
src/add-ons/kernel/network/stack/net_buffer.cpp  |  12 +-
.../imap/imap_lib/IMAPMailbox.h                  |   2 +-
.../imap/imap_lib/IMAPProtocol.h                 |   2 +-
src/apps/activitymonitor/ActivityView.h          |   2 +-
src/apps/powerstatus/DriverInterface.h           |   2 +-
src/build/libroot/atomic.cpp                     |  38 +++--
src/kits/support/String.cpp                      |   4 +-
src/system/kernel/UserEvent.cpp                  |  23 ++-
src/system/kernel/UserTimer.cpp                  |   4 +-
src/system/kernel/arch/x86/arch_cpu.cpp          |   6 +-
.../arch/x86/paging/32bit/X86PagingMethod32Bit.h |   2 +-
.../arch/x86/paging/64bit/X86PagingMethod64Bit.h |   2 +-
.../arch/x86/paging/pae/X86PagingMethodPAE.h     |   2 +-
src/system/kernel/fs/EntryCache.cpp              |   3 +-
src/system/kernel/fs/EntryCache.h                |   6 +-
src/system/kernel/fs/unused_vnodes.h             |  12 +-
src/system/kernel/posix/xsi_message_queue.cpp    |   6 +-
src/system/kernel/smp.cpp                        |  57 ++++----
src/system/libroot/os/arch/x86/atomic.S          | 144 ++++++++++++-------
src/system/libroot/os/arch/x86_64/atomic.S       |  76 ++++++----
src/system/libroot/os/locks/init_once.cpp        |   4 +-
src/system/libroot/posix/pthread/pthread_key.cpp |   2 +-
.../libroot/posix/pthread/pthread_once.cpp       |   8 +-
src/tools/fs_shell/atomic.cpp                    |  62 ++++----
src/tools/fs_shell/vfs.cpp                       |   8 +-

############################################################################

Commit:      e7dba861fd89792538442cc1b01584f7dabbd4a9
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Nov  5 19:28:25 2013 UTC

kernel: User{Event, Timer}: Use atomic access where necessary

----------------------------------------------------------------------------

diff --git a/src/system/kernel/UserEvent.cpp b/src/system/kernel/UserEvent.cpp
index 16f5aec..097ea2b 100644
--- a/src/system/kernel/UserEvent.cpp
+++ b/src/system/kernel/UserEvent.cpp
@@ -38,16 +38,13 @@ struct SignalEvent::EventSignal : Signal {
 
        void SetUnused()
        {
-               fInUse = 0;
+               // mark not-in-use
+               atomic_set(&fInUse, 0);
        }
 
        virtual void Handled()
        {
-               // mark not-in-use
-               {
-                       InterruptsSpinLocker _(gSchedulerLock);
-                       fInUse = 0;
-               }
+               SetUnused();
 
                Signal::Handled();
        }
@@ -121,11 +118,12 @@ TeamSignalEvent::Fire()
        InterruptsSpinLocker locker(gSchedulerLock);
        status_t error = send_signal_to_team_locked(fTeam, fSignal->Number(),
                fSignal, B_DO_NOT_RESCHEDULE);
+       locker.Unlock();
+
        // There are situations (for certain signals), in which
        // send_signal_to_team_locked() succeeds without queuing the signal.
        if (error != B_OK || !fSignal->IsPending())
                fSignal->SetUnused();
-       locker.Unlock();
 
        return error;
 }
@@ -166,8 +164,6 @@ ThreadSignalEvent::Create(Thread* thread, uint32 
signalNumber, int32 signalCode,
 status_t
 ThreadSignalEvent::Fire()
 {
-       dprintf("THREAD\n");
-
        if (fSignal->MarkUsed())
                return B_BUSY;
 
@@ -177,11 +173,12 @@ ThreadSignalEvent::Fire()
        InterruptsSpinLocker locker(gSchedulerLock);
        status_t error = send_signal_to_thread_locked(fThread, 
fSignal->Number(),
                fSignal, B_DO_NOT_RESCHEDULE);
+       locker.Unlock();
+
        // There are situations (for certain signals), in which
        // send_signal_to_team_locked() succeeds without queuing the signal.
        if (error != B_OK || !fSignal->IsPending())
                fSignal->SetUnused();
-       locker.Unlock();
 
        return error;
 }
@@ -233,7 +230,7 @@ void
 CreateThreadEvent::DoDPC(DPCQueue* queue)
 {
        // We're no longer queued in the DPC queue, so we can be reused.
-       fPendingDPC = 0;
+       atomic_set(&fPendingDPC, 0);
 
        // create the thread
        thread_id threadID = thread_create_thread(fCreationAttributes, false);
diff --git a/src/system/kernel/UserTimer.cpp b/src/system/kernel/UserTimer.cpp
index 05208cf..6c04125 100644
--- a/src/system/kernel/UserTimer.cpp
+++ b/src/system/kernel/UserTimer.cpp
@@ -287,9 +287,9 @@ UserTimer::CancelTimer()
 {
        ASSERT(fScheduled);
 
-       fSkip = 1;
+       atomic_set(&fSkip, 1);
        cancel_timer(&fTimer);
-       fSkip = 0;
+       atomic_set(&fSkip, 0);
 }
 
 

############################################################################

Commit:      077c84eb27b25430428d356f3d13afabc0cc0d13
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Nov  5 21:32:59 2013 UTC

kernel: atomic_*() functions rework

 * No need for the atomically changed variables to be declared as
   volatile.
 * Drop support for atomically getting and setting unaligned data.
 * Introduce atomic_get_and_set[64]() which works the same as
   atomic_set[64]() used to. atomic_set[64]() does not return the
   previous value anymore.

----------------------------------------------------------------------------

diff --git a/headers/build/HaikuBuildCompatibility.h 
b/headers/build/HaikuBuildCompatibility.h
index 009c77f..38b5197 100644
--- a/headers/build/HaikuBuildCompatibility.h
+++ b/headers/build/HaikuBuildCompatibility.h
@@ -118,17 +118,19 @@ struct media_node;
 extern "C" {
 #endif
 
-extern int32   atomic_set(vint32 *value, int32 newValue);
-extern int32   atomic_test_and_set(vint32 *value, int32 newValue,
+extern void            atomic_set(int32* value, int32 newValue);
+extern int32   atomic_get_and_set(int32* value, int32 newValue);
+extern int32   atomic_test_and_set(int32 *value, int32 newValue,
                                        int32 testAgainst);
-extern int32   atomic_get(vint32 *value);
-extern int64   atomic_set64(vint64 *value, int64 newValue);
-extern int64   atomic_test_and_set64(vint64 *value, int64 newValue,
+extern int32   atomic_get(int32 *value);
+extern void            atomic_set64(int64* value, int64 newValue);
+extern int64   atomic_get_and_set64(int64* value, int64 newValue);
+extern int64   atomic_test_and_set64(int64 *value, int64 newValue,
                                        int64 testAgainst);
-extern int64   atomic_get64(vint64 *value);
-extern int64   atomic_add64(vint64 *value, int64 addValue);
-extern int64   atomic_and64(vint64 *value, int64 andValue);
-extern int64   atomic_or64(vint64 *value, int64 orValue);
+extern int64   atomic_get64(int64 *value);
+extern int64   atomic_add64(int64 *value, int64 addValue);
+extern int64   atomic_and64(int64 *value, int64 andValue);
+extern int64   atomic_or64(int64 *value, int64 orValue);
 
 extern size_t  strnlen(const char *string, size_t count);
 
diff --git a/headers/os/drivers/KernelExport.h 
b/headers/os/drivers/KernelExport.h
index be17861..d416855 100644
--- a/headers/os/drivers/KernelExport.h
+++ b/headers/os/drivers/KernelExport.h
@@ -20,9 +20,9 @@ typedef ulong cpu_status;
 
 #if B_DEBUG_SPINLOCK_CONTENTION
        typedef struct {
-               vint32  lock;
-               vint32  count_low;
-               vint32  count_high;
+               int32   lock;
+               int32   count_low;
+               int32   count_high;
        } spinlock;
 
 #      define B_SPINLOCK_INITIALIZER { 0, 0, 0 }
@@ -33,7 +33,7 @@ typedef ulong cpu_status;
                } while (false)
 #      define B_SPINLOCK_IS_LOCKED(spinlock)   ((spinlock)->lock > 0)
 #else
-       typedef vint32 spinlock;
+       typedef int32 spinlock;
 
 #      define B_SPINLOCK_INITIALIZER 0
 #      define B_INITIALIZE_SPINLOCK(lock)      do { *(lock) = 0; } while 
(false)
diff --git a/headers/os/support/String.h b/headers/os/support/String.h
index e33fbc0..7ca3b64 100644
--- a/headers/os/support/String.h
+++ b/headers/os/support/String.h
@@ -396,8 +396,8 @@ private:
                                                                int32 
withLength);
 
 private:
-                       vint32&                 _ReferenceCount();
-                       const vint32&   _ReferenceCount() const;
+                       int32&                  _ReferenceCount();
+                       const int32&    _ReferenceCount() const;
                        bool                    _IsShareable() const;
                        void                    _FreePrivateData();
 
diff --git a/headers/os/support/SupportDefs.h b/headers/os/support/SupportDefs.h
index 7cc5aaf..aae8002 100644
--- a/headers/os/support/SupportDefs.h
+++ b/headers/os/support/SupportDefs.h
@@ -196,19 +196,21 @@ extern "C" {
 #endif
 
 /* Atomic functions; previous value is returned */
-extern int32   atomic_set(vint32 *value, int32 newValue);
-extern int32   atomic_test_and_set(vint32 *value, int32 newValue, int32 
testAgainst);
-extern int32   atomic_add(vint32 *value, int32 addValue);
-extern int32   atomic_and(vint32 *value, int32 andValue);
-extern int32   atomic_or(vint32 *value, int32 orValue);
-extern int32   atomic_get(vint32 *value);
-
-extern int64   atomic_set64(vint64 *value, int64 newValue);
-extern int64   atomic_test_and_set64(vint64 *value, int64 newValue, int64 
testAgainst);
-extern int64   atomic_add64(vint64 *value, int64 addValue);
-extern int64   atomic_and64(vint64 *value, int64 andValue);
-extern int64   atomic_or64(vint64 *value, int64 orValue);
-extern int64   atomic_get64(vint64 *value);
+extern void            atomic_set(int32* value, int32 newValue);
+extern int32   atomic_get_and_set(int32* value, int32 newValue);
+extern int32   atomic_test_and_set(int32 *value, int32 newValue, int32 
testAgainst);
+extern int32   atomic_add(int32 *value, int32 addValue);
+extern int32   atomic_and(int32 *value, int32 andValue);
+extern int32   atomic_or(int32 *value, int32 orValue);
+extern int32   atomic_get(int32 *value);
+
+extern void            atomic_set64(int64* value, int64 newValue);
+extern int64   atomic_get_and_set64(int64* value, int64 newValue);
+extern int64   atomic_test_and_set64(int64 *value, int64 newValue, int64 
testAgainst);
+extern int64   atomic_add64(int64 *value, int64 addValue);
+extern int64   atomic_and64(int64 *value, int64 andValue);
+extern int64   atomic_or64(int64 *value, int64 orValue);
+extern int64   atomic_get64(int64 *value);
 
 /* Other stuff */
 extern void*   get_stack_frame(void);
@@ -240,15 +242,6 @@ extern void*       get_stack_frame(void);
        __sync_fetch_and_and(valuePointer, andValue)
 #define atomic_or(valuePointer, orValue)       \
        __sync_fetch_and_or(valuePointer, orValue)
-#define atomic_get(valuePointer)       \
-       __sync_fetch_and_or(valuePointer, 0)
-       // No equivalent to atomic_get(). We simulate it via atomic or. On most
-       // (all?) 32+ bit architectures aligned 32 bit reads will be atomic 
anyway,
-       // though.
-
-// Note: No equivalent for atomic_set(). It could be simulated by a
-// get + atomic test and set loop, but calling the atomic_set() implementation
-// might be faster.
 
 #endif // B_USE_BUILTIN_ATOMIC_FUNCTIONS && __GNUC__ >= 4
 
diff --git a/headers/private/firewire/fwglue.h 
b/headers/private/firewire/fwglue.h
index 8962240..453ad32 100644
--- a/headers/private/firewire/fwglue.h
+++ b/headers/private/firewire/fwglue.h
@@ -30,7 +30,7 @@
 typedef uint32_t bus_addr_t;
 typedef uint32_t bus_size_t;
 
-#define atomic_readandclear_int(ptr) atomic_set((int32 *)(ptr), 0)
+#define atomic_readandclear_int(ptr) atomic_get_and_set((int32*)(ptr), 0)
 #define atomic_set_int(ptr, value) atomic_or((int32 *)(ptr), value)
 
 #define mtx_lock mutex_lock
diff --git a/headers/private/fs_shell/fssh_api_wrapper.h 
b/headers/private/fs_shell/fssh_api_wrapper.h
index edb40a9..679e360 100644
--- a/headers/private/fs_shell/fssh_api_wrapper.h
+++ b/headers/private/fs_shell/fssh_api_wrapper.h
@@ -50,6 +50,7 @@
 // #pragma mark - fssh_atomic.h
 
 #define atomic_set                     fssh_atomic_set
+#define atomic_get_and_Set     fssh_atomic_get_and_set
 #define atomic_test_and_set    fssh_atomic_test_and_set
 #define atomic_add                     fssh_atomic_add
 #define atomic_and                     fssh_atomic_and
diff --git a/headers/private/fs_shell/fssh_atomic.h 
b/headers/private/fs_shell/fssh_atomic.h
index df307a4..90fba9c 100644
--- a/headers/private/fs_shell/fssh_atomic.h
+++ b/headers/private/fs_shell/fssh_atomic.h
@@ -15,21 +15,23 @@ extern "C" {
 #endif
 
 
-int32_t        fssh_atomic_set(vint32_t *value, int32_t newValue);
-int32_t        fssh_atomic_test_and_set(vint32_t *value, int32_t newValue,
+void   fssh_atomic_set(int32_t* value, int32_t newValue);
+int32_t        fssh_atomic_get_and_set(int32_t* value, int32_t newValue);
+int32_t        fssh_atomic_test_and_set(int32_t *value, int32_t newValue,
                        int32_t testAgainst);
-int32_t        fssh_atomic_add(vint32_t *value, int32_t addValue);
-int32_t        fssh_atomic_and(vint32_t *value, int32_t andValue);
-int32_t        fssh_atomic_or(vint32_t *value, int32_t orValue);       
-int32_t        fssh_atomic_get(vint32_t *value);
-
-int64_t        fssh_atomic_set64(vint64_t *value, int64_t newValue);
-int64_t        fssh_atomic_test_and_set64(vint64_t *value, int64_t newValue,
+int32_t        fssh_atomic_add(int32_t *value, int32_t addValue);
+int32_t        fssh_atomic_and(int32_t *value, int32_t andValue);
+int32_t        fssh_atomic_or(int32_t *value, int32_t orValue);        
+int32_t        fssh_atomic_get(int32_t *value);
+
+void   fssh_atomic_set64(int64_t* value, int64_t newValue);
+int64_t        fssh_atomic_get_and_set64(int64_t* value, int64_t newValue);
+int64_t        fssh_atomic_test_and_set64(int64_t *value, int64_t newValue,
                        int64_t testAgainst);
-int64_t        fssh_atomic_add64(vint64_t *value, int64_t addValue);
-int64_t        fssh_atomic_and64(vint64_t *value, int64_t andValue);
-int64_t        fssh_atomic_or64(vint64_t *value, int64_t orValue);     
-int64_t        fssh_atomic_get64(vint64_t *value);
+int64_t        fssh_atomic_add64(int64_t *value, int64_t addValue);
+int64_t        fssh_atomic_and64(int64_t *value, int64_t andValue);
+int64_t        fssh_atomic_or64(int64_t *value, int64_t orValue);      
+int64_t        fssh_atomic_get64(int64_t *value);
 
 #ifdef __cplusplus
 }
diff --git a/headers/private/kernel/smp.h b/headers/private/kernel/smp.h
index 50bdcbe..d20ee28 100644
--- a/headers/private/kernel/smp.h
+++ b/headers/private/kernel/smp.h
@@ -47,7 +47,7 @@ status_t smp_per_cpu_init(struct kernel_args *args, int32 
cpu);
 status_t smp_init_post_generic_syscalls(void);
 bool smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous);
 void smp_wake_up_non_boot_cpus(void);
-void smp_cpu_rendezvous(volatile uint32 *var, int current_cpu);
+void smp_cpu_rendezvous(uint32 *var, int current_cpu);
 void smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2, 
addr_t data3,
                void *data_ptr, uint32 flags);
 void smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, addr_t data,
@@ -107,7 +107,7 @@ static inline bool
 try_acquire_write_seqlock_inline(seqlock* lock) {
        bool succeed = try_acquire_spinlock(&lock->lock);
        if (succeed)
-               atomic_add(&lock->count, 1);
+               atomic_add((int32*)&lock->count, 1);
        return succeed;
 }
 
@@ -115,26 +115,26 @@ try_acquire_write_seqlock_inline(seqlock* lock) {
 static inline void
 acquire_write_seqlock_inline(seqlock* lock) {
        acquire_spinlock(&lock->lock);
-       atomic_add(&lock->count, 1);
+       atomic_add((int32*)&lock->count, 1);
 }
 
 
 static inline void
 release_write_seqlock_inline(seqlock* lock) {
-       atomic_add(&lock->count, 1);
+       atomic_add((int32*)&lock->count, 1);
        release_spinlock(&lock->lock);
 }
 
 
 static inline uint32
 acquire_read_seqlock_inline(seqlock* lock) {
-       return atomic_get(&lock->count);
+       return atomic_get((int32*)&lock->count);
 }
 
 
 static inline bool
 release_read_seqlock_inline(seqlock* lock, uint32 count) {
-       uint32 current = atomic_get(&lock->count);
+       uint32 current = atomic_get((int32*)&lock->count);
 
        return count % 2 == 0 && current == count;
 }
diff --git a/headers/private/kernel/util/atomic.h 
b/headers/private/kernel/util/atomic.h
index 510df65..1472549 100644
--- a/headers/private/kernel/util/atomic.h
+++ b/headers/private/kernel/util/atomic.h
@@ -18,22 +18,34 @@ atomic_pointer_test_and_set(PointerType** _pointer, const 
PointerType* set,
        const PointerType* test)
 {
 #if LONG_MAX == INT_MAX
-       return (PointerType*)atomic_test_and_set((vint32*)_pointer, (int32)set,
+       return (PointerType*)atomic_test_and_set((int32*)_pointer, (int32)set,
                (int32)test);
 #else
-       return (PointerType*)atomic_test_and_set64((vint64*)_pointer, 
(int64)set,
+       return (PointerType*)atomic_test_and_set64((int64*)_pointer, (int64)set,
                (int64)test);
 #endif
 }
 
 
 template<typename PointerType> PointerType*
+atomic_pointer_get_and_set(PointerType** _pointer, const PointerType* set)
+{
+#if LONG_MAX == INT_MAX
+       return (PointerType*)atomic_get_and_set((int32*)_pointer, (int32)set);
+#else
+       return (PointerType*)atomic_get_and_set64((int64*)_pointer, (int64)set);
+#endif
+}
+
+
+template<typename PointerType> void
 atomic_pointer_set(PointerType** _pointer, const PointerType* set)
 {
+       ASSERT((addr_t(_pointer) & (sizeof(PointerType*) - 1)) == 0);
 #if LONG_MAX == INT_MAX
-       return (PointerType*)atomic_set((vint32*)_pointer, (int32)set);
+       atomic_set((int32*)_pointer, (int32)set);
 #else
-       return (PointerType*)atomic_set64((vint64*)_pointer, (int64)set);
+       atomic_set64((int64*)_pointer, (int64)set);
 #endif
 }
 
@@ -41,10 +53,11 @@ atomic_pointer_set(PointerType** _pointer, const 
PointerType* set)
 template<typename PointerType> PointerType*
 atomic_pointer_get(PointerType** _pointer)
 {
+       ASSERT((addr_t(_pointer) & (sizeof(PointerType*) - 1)) == 0);
 #if LONG_MAX == INT_MAX
-       return (PointerType*)atomic_get((vint32*)_pointer);
+       return (PointerType*)atomic_get((int32*)_pointer);
 #else
-       return (PointerType*)atomic_get64((vint64*)_pointer);
+       return (PointerType*)atomic_get64((int64*)_pointer);
 #endif
 }
 
diff --git a/headers/private/libroot/pthread_private.h 
b/headers/private/libroot/pthread_private.h
index 1ddc00c..659ac7b 100644
--- a/headers/private/libroot/pthread_private.h
+++ b/headers/private/libroot/pthread_private.h
@@ -50,12 +50,12 @@ typedef struct _pthread_rwlockattr {
 typedef void (*pthread_key_destructor)(void *data);
 
 struct pthread_key {
-       vint32          sequence;
+       int32           sequence;
        pthread_key_destructor destructor;
 };
 
 struct pthread_key_data {
-       vint32          sequence;
+       int32           sequence;
        void            *value;
 };
 
diff --git a/headers/private/package/TempfileManager.h 
b/headers/private/package/TempfileManager.h
index e53ebad..3d118b2 100644
--- a/headers/private/package/TempfileManager.h
+++ b/headers/private/package/TempfileManager.h
@@ -31,7 +31,7 @@ private:
 
 private:
                        BDirectory                      fBaseDirectory;
-                       vint32                          fNextNumber;
+                       int32                           fNextNumber;
 };
 
 
diff --git a/headers/private/shared/Referenceable.h 
b/headers/private/shared/Referenceable.h
index ed9eff8..0c7b298 100644
--- a/headers/private/shared/Referenceable.h
+++ b/headers/private/shared/Referenceable.h
@@ -30,7 +30,7 @@ protected:
        virtual void                            LastReferenceReleased();
 
 protected:
-                       vint32                          fReferenceCount;
+                       int32                           fReferenceCount;
 };
 
 
diff --git a/headers/private/shared/WeakReferenceable.h 
b/headers/private/shared/WeakReferenceable.h
index c09bb0c..7d1774f 100644
--- a/headers/private/shared/WeakReferenceable.h
+++ b/headers/private/shared/WeakReferenceable.h
@@ -30,7 +30,7 @@ public:
                        void                            GetUnchecked();
 
 private:
-                       vint32                          fUseCount;
+                       int32                           fUseCount;
                        BWeakReferenceable*     fObject;
 };
 
diff --git a/headers/private/shared/locks.h b/headers/private/shared/locks.h
index 052f24b..23b227d 100644
--- a/headers/private/shared/locks.h
+++ b/headers/private/shared/locks.h
@@ -71,7 +71,7 @@ int32         recursive_lock_get_recursion(recursive_lock 
*lock);
 #define                INIT_ONCE_UNINITIALIZED -1
 #define                INIT_ONCE_INITIALIZED   -4
 
-status_t       __init_once(vint32* control, status_t (*initRoutine)(void*),
+status_t       __init_once(int32* control, status_t (*initRoutine)(void*),
                                void* data);
 
 #ifdef __cplusplus
diff --git a/headers/private/support/StringPrivate.h 
b/headers/private/support/StringPrivate.h
index 799151b..0ad274c 100644
--- a/headers/private/support/StringPrivate.h
+++ b/headers/private/support/StringPrivate.h
@@ -32,12 +32,12 @@ public:
                return fString._IsShareable();
        }
 
-       static vint32& DataRefCount(char* data)
+       static int32& DataRefCount(char* data)
        {
                return *(((int32 *)data) - 2);
        }
 
-       vint32& DataRefCount()
+       int32& DataRefCount()
        {
                return DataRefCount(Data());
        }
diff --git a/src/add-ons/kernel/bus_managers/ps2/ps2_dev.h 
b/src/add-ons/kernel/bus_managers/ps2/ps2_dev.h
index 229dbe0..98c8c45 100644
--- a/src/add-ons/kernel/bus_managers/ps2/ps2_dev.h
+++ b/src/add-ons/kernel/bus_managers/ps2/ps2_dev.h
@@ -28,7 +28,7 @@ struct ps2_dev {
        bool                    active;
        uint8           idx;
        sem_id                  result_sem;
-       vint32                  flags;
+       int32                   flags;
        uint8 *                 result_buf;
        int                             result_buf_idx;
        int                             result_buf_cnt;
diff --git a/src/add-ons/kernel/drivers/input/usb_hid/HIDDevice.cpp 
b/src/add-ons/kernel/drivers/input/usb_hid/HIDDevice.cpp
index c1e15e1..9348370 100644
--- a/src/add-ons/kernel/drivers/input/usb_hid/HIDDevice.cpp
+++ b/src/add-ons/kernel/drivers/input/usb_hid/HIDDevice.cpp
@@ -264,7 +264,7 @@ HIDDevice::MaybeScheduleTransfer()
        if (fRemoved)
                return B_ERROR;
 
-       if (atomic_set(&fTransferScheduled, 1) != 0) {
+       if (atomic_get_and_set(&fTransferScheduled, 1) != 0) {
                // someone else already caused a transfer to be scheduled
                return B_OK;
        }
diff --git a/src/add-ons/kernel/drivers/power/acpi_battery/acpi_battery.h 
b/src/add-ons/kernel/drivers/power/acpi_battery/acpi_battery.h
index d712a4f..ffd11dd 100644
--- a/src/add-ons/kernel/drivers/power/acpi_battery/acpi_battery.h
+++ b/src/add-ons/kernel/drivers/power/acpi_battery/acpi_battery.h
@@ -33,7 +33,7 @@ struct battery_driver_cookie {
 
 struct battery_device_cookie {
        battery_driver_cookie*          driver_cookie;
-       vint32                                          stop_watching;
+       int32                                           stop_watching;
 };
 
 
diff --git 
a/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.cpp 
b/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.cpp
index b332c49..eaf29c2 100644
--- a/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.cpp
+++ b/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.cpp
@@ -35,7 +35,7 @@
 
 static device_manager_info *sDeviceManager;
 static ConditionVariable sFrequencyCondition;
-static vint32 sCurrentID;
+static int32 sCurrentID;
 
 
 static status_t
diff --git 
a/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.h 
b/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.h
index d511f38..b51fa91 100644
--- a/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.h
+++ b/src/add-ons/kernel/drivers/power/enhanced_speedstep/enhanced_speedstep.h
@@ -22,7 +22,7 @@ struct est_cookie {
        freq_info*                              available_states;
        uint8                                   number_states;
        
-       vint32                                  stop_watching;
+       int32                                   stop_watching;
 };
 
 
diff --git a/src/add-ons/kernel/file_systems/nfs4/FileSystem.h 
b/src/add-ons/kernel/file_systems/nfs4/FileSystem.h
index 5ca172d..5fdca26 100644
--- a/src/add-ons/kernel/file_systems/nfs4/FileSystem.h
+++ b/src/add-ons/kernel/file_systems/nfs4/FileSystem.h
@@ -109,7 +109,7 @@ private:
 
                        RPC::Server*            fServer;
 
-                       vint64                          fId;
+                       int64                           fId;
                        dev_t                           fDevId;
 
                        InodeIdMap                      fInoIdMap;
diff --git a/src/add-ons/kernel/network/datalink_protocols/arp/arp.cpp 
b/src/add-ons/kernel/network/datalink_protocols/arp/arp.cpp
index 23a7eaa..d2ee6eb 100644
--- a/src/add-ons/kernel/network/datalink_protocols/arp/arp.cpp
+++ b/src/add-ons/kernel/network/datalink_protocols/arp/arp.cpp
@@ -187,7 +187,7 @@ put_request_buffer(arp_entry* entry, net_buffer* buffer)
 static void
 delete_request_buffer(arp_entry* entry)
 {
-       net_buffer* buffer = atomic_pointer_set(&entry->request_buffer,
+       net_buffer* buffer = atomic_pointer_get_and_set(&entry->request_buffer,
                kDeletedBuffer);
        if (buffer != NULL && buffer != kDeletedBuffer)
                gBufferModule->free(buffer);
diff --git 
a/src/add-ons/kernel/network/datalink_protocols/ipv6_datagram/ipv6_datagram.cpp 
b/src/add-ons/kernel/network/datalink_protocols/ipv6_datagram/ipv6_datagram.cpp
index e38c433..e510e53 100644
--- 
a/src/add-ons/kernel/network/datalink_protocols/ipv6_datagram/ipv6_datagram.cpp
+++ 
b/src/add-ons/kernel/network/datalink_protocols/ipv6_datagram/ipv6_datagram.cpp
@@ -251,7 +251,7 @@ put_request_buffer(ndp_entry* entry, net_buffer* buffer)
 static void
 delete_request_buffer(ndp_entry* entry)
 {
-       net_buffer* buffer = atomic_pointer_set(&entry->request_buffer,
+       net_buffer* buffer = atomic_pointer_get_and_set(&entry->request_buffer,
                kDeletedBuffer);
        if (buffer != NULL && buffer != kDeletedBuffer)
                gBufferModule->free(buffer);
diff --git a/src/add-ons/kernel/network/stack/net_buffer.cpp 
b/src/add-ons/kernel/network/stack/net_buffer.cpp
index d302c6a..8265d92 100644
--- a/src/add-ons/kernel/network/stack/net_buffer.cpp
+++ b/src/add-ons/kernel/network/stack/net_buffer.cpp
@@ -166,12 +166,12 @@ static status_t read_data(net_buffer* _buffer, size_t 
offset, void* data,
 
 
 #if ENABLE_STATS
-static vint32 sAllocatedDataHeaderCount = 0;
-static vint32 sAllocatedNetBufferCount = 0;
-static vint32 sEverAllocatedDataHeaderCount = 0;
-static vint32 sEverAllocatedNetBufferCount = 0;
-static vint32 sMaxAllocatedDataHeaderCount = 0;
-static vint32 sMaxAllocatedNetBufferCount = 0;
+static int32 sAllocatedDataHeaderCount = 0;
+static int32 sAllocatedNetBufferCount = 0;
+static int32 sEverAllocatedDataHeaderCount = 0;
+static int32 sEverAllocatedNetBufferCount = 0;
+static int32 sMaxAllocatedDataHeaderCount = 0;
+static int32 sMaxAllocatedNetBufferCount = 0;
 #endif
 
 
diff --git 
a/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPMailbox.h 
b/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPMailbox.h
index b814919..b8c53dc 100644
--- a/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPMailbox.h
+++ b/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPMailbox.h
@@ -90,7 +90,7 @@ private:
                        ExpungeHandler          fExpungeHandler;
                        FlagsHandler            fFlagsHandler;
 
-                       vint32                          fWatching;
+                       int32                           fWatching;
 
                        BString                         fSelectedMailbox;
 
diff --git 
a/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPProtocol.h 
b/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPProtocol.h
index 833e9b7..bd236fa 100644
--- a/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPProtocol.h
+++ b/src/add-ons/mail_daemon/inbound_protocols/imap/imap_lib/IMAPProtocol.h
@@ -120,7 +120,7 @@ private:
 
                        BString                         fCommandError;
 
-                       vint32                          fStopNow;
+                       int32                           fStopNow;
 
                        bool                            fIsConnected;
 };
diff --git a/src/apps/activitymonitor/ActivityView.h 
b/src/apps/activitymonitor/ActivityView.h
index 17ac4f8..4c9488e 100644
--- a/src/apps/activitymonitor/ActivityView.h
+++ b/src/apps/activitymonitor/ActivityView.h
@@ -76,7 +76,7 @@ public:
                        void            RemoveAllDataSources();
 
                        bigtime_t       RefreshInterval() const
-                                                       { return 
atomic_get64((vint64*)&fRefreshInterval); }
+                                                       { return 
atomic_get64((int64*)&fRefreshInterval); }
 
 protected:
        virtual void            AttachedToWindow();
diff --git a/src/apps/powerstatus/DriverInterface.h 
b/src/apps/powerstatus/DriverInterface.h
index 52194b6..fd196bc 100644
--- a/src/apps/powerstatus/DriverInterface.h
+++ b/src/apps/powerstatus/DriverInterface.h
@@ -67,7 +67,7 @@ public:
 protected:
        virtual void            _WatchPowerStatus() = 0;
 
-       vint32                          fIsWatching;
+       int32                           fIsWatching;
        sem_id                          fWaitSem;
 
 private:
diff --git a/src/build/libroot/atomic.cpp b/src/build/libroot/atomic.cpp
index f7a7269..38aed02 100644
--- a/src/build/libroot/atomic.cpp
+++ b/src/build/libroot/atomic.cpp
@@ -6,8 +6,15 @@
 #include <SupportDefs.h>
 
 
+void
+atomic_set(int32 *value, int32 newValue)
+{
+       *value = newValue;
+}
+
+
 int32
-atomic_set(vint32 *value, int32 newValue)
+atomic_get_and_set(int32 *value, int32 newValue)
 {
        int32 oldValue = *value;
        *value = newValue;
@@ -16,7 +23,7 @@ atomic_set(vint32 *value, int32 newValue)
 
 
 int32
-atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst)
+atomic_test_and_set(int32 *value, int32 newValue, int32 testAgainst)
 {
        int32 oldValue = *value;
        if (oldValue == testAgainst)
@@ -26,7 +33,7 @@ atomic_test_and_set(vint32 *value, int32 newValue, int32 
testAgainst)
 
 
 int32
-atomic_add(vint32 *value, int32 addValue)
+atomic_add(int32 *value, int32 addValue)
 {
        int32 oldValue = *value;
        *value += addValue;
@@ -35,7 +42,7 @@ atomic_add(vint32 *value, int32 addValue)
 
 
 int32
-atomic_and(vint32 *value, int32 andValue)
+atomic_and(int32 *value, int32 andValue)
 {
        int32 oldValue = *value;
        *value &= andValue;
@@ -44,7 +51,7 @@ atomic_and(vint32 *value, int32 andValue)
 
 
 int32
-atomic_or(vint32 *value, int32 orValue)        
+atomic_or(int32 *value, int32 orValue) 
 {
        int32 oldValue = *value;
        *value |= orValue;
@@ -53,14 +60,21 @@ atomic_or(vint32 *value, int32 orValue)
 
 
 int32
-atomic_get(vint32 *value)
+atomic_get(int32 *value)
 {
        return *value;
 }
 
 
+void
+atomic_set64(int64 *value, int64 newValue)
+{
+       *value = newValue;
+}
+
+
 int64
-atomic_set64(vint64 *value, int64 newValue)
+atomic_get_and_set64(int64 *value, int64 newValue)
 {
        int64 oldValue = *value;
        *value = newValue;
@@ -68,7 +82,7 @@ atomic_set64(vint64 *value, int64 newValue)
 }
 
 int64
-atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
+atomic_test_and_set64(int64 *value, int64 newValue, int64 testAgainst)
 {
        int64 oldValue = *value;
        if (oldValue == testAgainst)
@@ -77,7 +91,7 @@ atomic_test_and_set64(vint64 *value, int64 newValue, int64 
testAgainst)
 }
 
 int64
-atomic_add64(vint64 *value, int64 addValue)
+atomic_add64(int64 *value, int64 addValue)
 {
        int64 oldValue = *value;
        *value += addValue;
@@ -85,7 +99,7 @@ atomic_add64(vint64 *value, int64 addValue)
 }
 
 int64
-atomic_and64(vint64 *value, int64 andValue)
+atomic_and64(int64 *value, int64 andValue)
 {
        int64 oldValue = *value;
        *value &= andValue;
@@ -93,7 +107,7 @@ atomic_and64(vint64 *value, int64 andValue)
 }
 
 int64
-atomic_or64(vint64 *value, int64 orValue)
+atomic_or64(int64 *value, int64 orValue)
 {
        int64 oldValue = *value;
        *value |= orValue;
@@ -101,7 +115,7 @@ atomic_or64(vint64 *value, int64 orValue)
 }
 
 int64
-atomic_get64(vint64 *value)
+atomic_get64(int64 *value)
 {
        return *value;
 }
diff --git a/src/kits/support/String.cpp b/src/kits/support/String.cpp
index 56b6084..b5cfaaf 100644
--- a/src/kits/support/String.cpp
+++ b/src/kits/support/String.cpp
@@ -183,14 +183,14 @@ BStringRef::operator&()
 //     #pragma mark - BString
 
 
-inline vint32&
+inline int32&
 BString::_ReferenceCount()
 {
        return Private::DataRefCount(fPrivateData);
 }
 
 
-inline const vint32&
+inline const int32&
 BString::_ReferenceCount() const
 {
        return Private::DataRefCount(fPrivateData);
diff --git a/src/system/kernel/UserEvent.cpp b/src/system/kernel/UserEvent.cpp
index 097ea2b..9e460a5 100644
--- a/src/system/kernel/UserEvent.cpp
+++ b/src/system/kernel/UserEvent.cpp
@@ -33,7 +33,7 @@ struct SignalEvent::EventSignal : Signal {
 
        bool MarkUsed()
        {
-               return atomic_set(&fInUse, 1) != 0;
+               return atomic_get_and_set(&fInUse, 1) != 0;
        }
 
        void SetUnused()
@@ -216,7 +216,7 @@ CreateThreadEvent::Create(const ThreadCreationAttributes& 
attributes)
 status_t
 CreateThreadEvent::Fire()
 {
-       bool wasPending = atomic_set(&fPendingDPC, 1) != 0;
+       bool wasPending = atomic_get_and_set(&fPendingDPC, 1) != 0;
        if (wasPending)
                return B_BUSY;
 
diff --git a/src/system/kernel/arch/x86/arch_cpu.cpp 
b/src/system/kernel/arch/x86/arch_cpu.cpp
index 91cc6bf..be1afda 100644
--- a/src/system/kernel/arch/x86/arch_cpu.cpp
+++ b/src/system/kernel/arch/x86/arch_cpu.cpp
@@ -205,7 +205,7 @@ set_mtrr(void* _parameter, int cpu)
        // sCpuRendezvous2 before the last CPU has actually left the loop in
        // smp_cpu_rendezvous();
        if (cpu == 0)
-               atomic_set((vint32*)&sCpuRendezvous3, 0);
+               atomic_set((int32*)&sCpuRendezvous3, 0);
 
        disable_caches();
 
@@ -233,7 +233,7 @@ set_mtrrs(void* _parameter, int cpu)
        // sCpuRendezvous2 before the last CPU has actually left the loop in
        // smp_cpu_rendezvous();
        if (cpu == 0)
-               atomic_set((vint32*)&sCpuRendezvous3, 0);
+               atomic_set((int32*)&sCpuRendezvous3, 0);
 
        disable_caches();
 
@@ -259,7 +259,7 @@ init_mtrrs(void* _unused, int cpu)
        // sCpuRendezvous2 before the last CPU has actually left the loop in
        // smp_cpu_rendezvous();
        if (cpu == 0)
-               atomic_set((vint32*)&sCpuRendezvous3, 0);
+               atomic_set((int32*)&sCpuRendezvous3, 0);
 
        disable_caches();
 
diff --git a/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.h 
b/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.h
index f781f7d..8df3181 100644
--- a/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.h
+++ b/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.h
@@ -108,7 +108,7 @@ X86PagingMethod32Bit::Method()
 X86PagingMethod32Bit::SetPageTableEntry(page_table_entry* entry,
        page_table_entry newEntry)
 {
-       return atomic_set((int32*)entry, newEntry);
+       return atomic_get_and_set((int32*)entry, newEntry);
 }
 
 
diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h 
b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h
index e834434..ac1bfeb 100644
--- a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h
+++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h
@@ -116,7 +116,7 @@ X86PagingMethod64Bit::Method()
 /*static*/ inline uint64
 X86PagingMethod64Bit::SetTableEntry(uint64* entry, uint64 newEntry)
 {
-       return atomic_set64((int64*)entry, newEntry);
+       return atomic_get_and_set64((int64*)entry, newEntry);
 }
 
 
diff --git a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h 
b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h
index 6d6b1ed..c787bdc 100644
--- a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h
+++ b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h
@@ -158,7 +158,7 @@ X86PagingMethodPAE::PageDirEntryForAddress(
 X86PagingMethodPAE::SetPageTableEntry(pae_page_table_entry* entry,
        pae_page_table_entry newEntry)
 {
-       return atomic_set64((int64*)entry, newEntry);
+       return atomic_get_and_set64((int64*)entry, newEntry);
 }
 
 
diff --git a/src/system/kernel/fs/EntryCache.cpp 
b/src/system/kernel/fs/EntryCache.cpp
index 2c44d60..e9a6a14 100644
--- a/src/system/kernel/fs/EntryCache.cpp
+++ b/src/system/kernel/fs/EntryCache.cpp
@@ -165,7 +165,8 @@ EntryCache::Lookup(ino_t dirID, const char* name, ino_t& 
_nodeID)
        if (entry == NULL)
                return false;
 
-       int32 oldGeneration = atomic_set(&entry->generation, 
fCurrentGeneration);
+       int32 oldGeneration = atomic_get_and_set(&entry->generation,
+                       fCurrentGeneration);
        if (oldGeneration == fCurrentGeneration || entry->index < 0) {
                // The entry is already in the current generation or is being 
moved to
                // it by another thread.
diff --git a/src/system/kernel/fs/EntryCache.h 
b/src/system/kernel/fs/EntryCache.h
index 91d7404..e197426 100644
--- a/src/system/kernel/fs/EntryCache.h
+++ b/src/system/kernel/fs/EntryCache.h
@@ -34,14 +34,14 @@ struct EntryCacheEntry {
                        EntryCacheEntry*        hash_link;
                        ino_t                           node_id;
                        ino_t                           dir_id;
-                       vint32                          generation;
-                       vint32                          index;
+                       int32                           generation;
+                       int32                           index;
                        char                            name[1];
 };
 
 
 struct EntryCacheGeneration {
-                       vint32                          next_index;
+                       int32                           next_index;
                        EntryCacheEntry**       entries;
 
                                                                
EntryCacheGeneration();
diff --git a/src/system/kernel/fs/unused_vnodes.h 
b/src/system/kernel/fs/unused_vnodes.h
index ca866de..5f5bb49 100644
--- a/src/system/kernel/fs/unused_vnodes.h
+++ b/src/system/kernel/fs/unused_vnodes.h
@@ -30,15 +30,15 @@ const static uint32 kMaxUnusedVnodes = 8192;
 */
 static mutex sUnusedVnodesLock = MUTEX_INITIALIZER("unused vnodes");
 static list sUnusedVnodeList;
-static vuint32 sUnusedVnodes = 0;
+static uint32 sUnusedVnodes = 0;
 
 static const int32 kMaxHotVnodes = 1024;
 static rw_lock sHotVnodesLock = RW_LOCK_INITIALIZER("hot vnodes");
 static Vnode* sHotVnodes[kMaxHotVnodes];
-static vint32 sNextHotVnodeIndex = 0;
+static int32 sNextHotVnodeIndex = 0;
 
 static const int32 kUnusedVnodesCheckInterval = 64;
-static vint32 sUnusedVnodesCheckCount = 0;
+static int32 sUnusedVnodesCheckCount = 0;
 
 
 /*!    Must be called with sHotVnodesLock write-locked.
@@ -48,7 +48,7 @@ flush_hot_vnodes_locked()
 {
        MutexLocker unusedLocker(sUnusedVnodesLock);
 
-       int32 count = std::min((int32)sNextHotVnodeIndex, kMaxHotVnodes);
+       int32 count = std::min(sNextHotVnodeIndex, kMaxHotVnodes);
        for (int32 i = 0; i < count; i++) {
                Vnode* vnode = sHotVnodes[i];
                if (vnode == NULL)
@@ -87,7 +87,7 @@ vnode_unused(Vnode* vnode)
        bool result = false;
        int32 checkCount = atomic_add(&sUnusedVnodesCheckCount, 1);
        if (checkCount == kUnusedVnodesCheckInterval) {
-               uint32 unusedCount = sUnusedVnodes;
+               uint32 unusedCount = atomic_get((int32*)&sUnusedVnodes);
                if (unusedCount > kMaxUnusedVnodes
                        && low_resource_state(
                                B_KERNEL_RESOURCE_PAGES | 
B_KERNEL_RESOURCE_MEMORY)
@@ -164,7 +164,7 @@ vnode_to_be_freed(Vnode* vnode)
        if (vnode->IsHot()) {
                // node is hot -- remove it from the array
 // TODO: Maybe better completely flush the array while at it?
-               int32 count = sNextHotVnodeIndex;
+               int32 count = atomic_get(&sNextHotVnodeIndex);
                count = std::min(count, kMaxHotVnodes);
                for (int32 i = 0; i < count; i++) {
                        if (sHotVnodes[i] == vnode) {
diff --git a/src/system/kernel/posix/xsi_message_queue.cpp 
b/src/system/kernel/posix/xsi_message_queue.cpp
index f0141b1..247a1ae 100644
--- a/src/system/kernel/posix/xsi_message_queue.cpp
+++ b/src/system/kernel/posix/xsi_message_queue.cpp
@@ -385,8 +385,8 @@ static mutex sIpcLock;
 static mutex sXsiMessageQueueLock;
 
 static uint32 sGlobalSequenceNumber = 1;
-static vint32 sXsiMessageCount = 0;
-static vint32 sXsiMessageQueueCount = 0;
+static int32 sXsiMessageCount = 0;
+static int32 sXsiMessageQueueCount = 0;
 
 
 //     #pragma mark -
@@ -690,7 +690,7 @@ _user_xsi_msgget(key_t key, int flags)
 
        if (create) {
                // Create a new message queue for this key
-               if (sXsiMessageQueueCount >= MAX_XSI_MESSAGE_QUEUE) {
+               if (atomic_get(&sXsiMessageQueueCount) >= 
MAX_XSI_MESSAGE_QUEUE) {
                        TRACE_ERROR(("xsi_msgget: reached limit of maximun 
number of "
                                "message queues\n"));
                        return ENOSPC;
diff --git a/src/system/kernel/smp.cpp b/src/system/kernel/smp.cpp
index 93be92f..1b2f886 100644
--- a/src/system/kernel/smp.cpp
+++ b/src/system/kernel/smp.cpp
@@ -79,9 +79,9 @@ enum mailbox_source {
        MAILBOX_BCAST,
 };
 
-static vint32 sBootCPUSpin = 0;
+static int32 sBootCPUSpin = 0;
 
-static vint32 sEarlyCPUCall = 0;
+static int32 sEarlyCPUCall = 0;
 static void (*sEarlyCPUCallFunction)(void*, int);
 void* sEarlyCPUCallCookie;
 
@@ -109,7 +109,7 @@ static struct {
        spinlock        *lock;
 } sLastCaller[NUM_LAST_CALLERS];
 
-static vint32 sLastIndex = 0;
+static int32 sLastIndex = 0;
        // Is incremented atomically. Must be % NUM_LAST_CALLERS before being 
used
        // as index into sLastCaller. Note, that it has to be casted to uint32
        // before applying the modulo operation, since otherwise after 
overflowing
@@ -129,7 +129,7 @@ push_lock_caller(void* caller, spinlock* lock)
 static void*
 find_lock_caller(spinlock* lock)
 {
-       int32 lastIndex = (uint32)sLastIndex % NUM_LAST_CALLERS;
+       int32 lastIndex = (uint32)atomic_get(&sLastIndex) % NUM_LAST_CALLERS;
 
        for (int32 i = 0; i < NUM_LAST_CALLERS; i++) {
                int32 index = (NUM_LAST_CALLERS + lastIndex - 1 - i) % 
NUM_LAST_CALLERS;
@@ -522,7 +522,7 @@ bool
 try_acquire_write_seqlock(seqlock* lock) {
        bool succeed = try_acquire_spinlock(&lock->lock);
        if (succeed)
-               atomic_add(&lock->count, 1);
+               atomic_add((int32*)&lock->count, 1);
        return succeed;
 }
 
@@ -530,26 +530,26 @@ try_acquire_write_seqlock(seqlock* lock) {
 void
 acquire_write_seqlock(seqlock* lock) {
        acquire_spinlock(&lock->lock);
-       atomic_add(&lock->count, 1);
+       atomic_add((int32*)&lock->count, 1);
 }
 
 
 void
 release_write_seqlock(seqlock* lock) {
-       atomic_add(&lock->count, 1);
+       atomic_add((int32*)&lock->count, 1);
        release_spinlock(&lock->lock);
 }
 
 
 uint32
 acquire_read_seqlock(seqlock* lock) {
-       return atomic_get(&lock->count);
+       return atomic_get((int32*)&lock->count);
 }
 
 
 bool
 release_read_seqlock(seqlock* lock, uint32 count) {
-       uint32 current = atomic_get(&lock->count);
+       uint32 current = atomic_get((int32*)&lock->count);
 
        if (count % 2 == 1 || current != count) {
                PAUSE();
@@ -870,10 +870,10 @@ call_all_cpus_early(void (*function)(void*, int), void* 
cookie)
                uint32 cpuMask = (1 << sNumCPUs) - 2;
                        // all CPUs but the boot cpu
 
-               sEarlyCPUCall = cpuMask;
+               atomic_set(&sEarlyCPUCall, cpuMask);
 
                // wait for all CPUs to finish
-               while ((sEarlyCPUCall & cpuMask) != 0)
+               while ((atomic_get(&sEarlyCPUCall) & cpuMask) != 0)
                        PAUSE();
        }
 
@@ -1166,8 +1166,8 @@ smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous)
 
        smp_cpu_rendezvous(rendezVous, cpu);
 
-       while (sBootCPUSpin == 0) {
-               if ((sEarlyCPUCall & (1 << cpu)) != 0)
+       while (atomic_get(&sBootCPUSpin) == 0) {
+               if ((atomic_get(&sEarlyCPUCall) & (1 << cpu)) != 0)
                        process_early_cpu_call(cpu);
 
                PAUSE();
@@ -1185,7 +1185,7 @@ smp_wake_up_non_boot_cpus()
                sICIEnabled = true;
 
        // resume non boot CPUs
-       sBootCPUSpin = 1;
+       atomic_set(&sBootCPUSpin, 1);
 }
 
 
@@ -1200,11 +1200,12 @@ smp_wake_up_non_boot_cpus()
        ensured via another rendez-vous) the variable can be reset.
 */
 void
-smp_cpu_rendezvous(volatile uint32* var, int current_cpu)
+smp_cpu_rendezvous(uint32* var, int current_cpu)
 {
-       atomic_or((vint32*)var, 1 << current_cpu);
+       atomic_or((int32*)var, 1 << current_cpu);
 
-       while (*var != (((uint32)1 << sNumCPUs) - 1))
+       uint32 allReady = ((uint32)1 << sNumCPUs) - 1;
+       while ((uint32)atomic_get((int32*)var) != allReady)
                PAUSE();
 }
 
diff --git a/src/system/libroot/os/arch/x86/atomic.S 
b/src/system/libroot/os/arch/x86/atomic.S
index 31b46f1..9e26dfd 100644
--- a/src/system/libroot/os/arch/x86/atomic.S
+++ b/src/system/libroot/os/arch/x86/atomic.S
@@ -1,4 +1,7 @@
 /*
+ * Copyright 2013, Paweł Dziepak, pdziepak@xxxxxxxxxxx.
+ * Distributed under the terms of the MIT License.
+ *
 ** Copyright 2003, Marcus Overhagen. All rights reserved.
 ** Distributed under the terms of the OpenBeOS license.
 **
@@ -6,96 +9,133 @@
 ** Distributed under the terms of the NewOS License.
 */
 
+
 #include <asm_defs.h>
 
 
 .text
 
-/* int32       atomic_set(vint32 *value, int32 newValue) */
+
+/* void                atomic_set(int32* value, int32 newValue) */
 FUNCTION(atomic_set):
-       movl            4(%esp),%edx
-       movl            8(%esp),%eax
+       movl            4(%esp), %edx
+       movl            8(%esp), %eax
        lock
-       xchg            %eax,(%edx)
+       addl            $0, (%esp)
+       movl            %eax, (%edx)
        ret
 FUNCTION_END(atomic_set)
 
-/* int32       atomic_test_and_set(vint32 *value, int32 newValue, int32 
testAgainst) */
+
+/* int32       atomic_get_and_set(int32* value, int32 newValue) */
+FUNCTION(atomic_get_and_set):
+       movl            4(%esp), %edx
+       movl            8(%esp), %eax
+       xchg            %eax, (%edx)
+       ret
+FUNCTION_END(atomic_get_and_set)
+
+
+/* int32       atomic_test_and_set(int32* value, int32 newValue,
+       int32 testAgainst) */
 FUNCTION(atomic_test_and_set):
-       movl            4(%esp),%edx
-       movl            8(%esp),%ecx
-       movl            12(%esp),%eax
+       movl            4(%esp), %edx
+       movl            8(%esp), %ecx
+       movl            12(%esp), %eax
        lock
-       cmpxchgl        %ecx,(%edx)
+       cmpxchgl        %ecx, (%edx)
        ret
 FUNCTION_END(atomic_test_and_set)
 
-/* int32       atomic_add(vint32 *value, int32 addValue) */
+
+/* int32       atomic_add(int32* value, int32 addValue) */
 FUNCTION(atomic_add):
-       movl            4(%esp),%edx
-       movl            8(%esp),%eax
+       movl            4(%esp), %edx
+       movl            8(%esp), %eax
        lock
-       xaddl           %eax,(%edx)
+       xaddl           %eax, (%edx)
        ret
 FUNCTION_END(atomic_add)
 
-/* int32       atomic_and(vint32 *value, int32 andValue) */
+
+/* int32       atomic_and(int32* value, int32 andValue) */
 FUNCTION(atomic_and):
-       movl            4(%esp),%edx
-_atomic_and1:
-       movl            8(%esp),%ecx
-       movl            (%edx),%eax
-       andl            %eax,%ecx
+       movl            4(%esp), %edx
+1:
+       movl            8(%esp), %ecx
+       movl            (%edx), %eax
+       andl            %eax, %ecx
        lock
-       cmpxchgl        %ecx,(%edx)
-       jnz                     _atomic_and1
+       cmpxchgl        %ecx, (%edx)
+       jnz                     1b
        ret
 FUNCTION_END(atomic_and)
 
-/* int32       atomic_or(vint32 *value, int32 orValue) */
+
+/* int32       atomic_or(int32* value, int32 orValue) */
 FUNCTION(atomic_or):
-       movl            4(%esp),%edx
-_atomic_or1:
-       movl            8(%esp),%ecx
-       movl            (%edx),%eax
-       orl                     %eax,%ecx
+       movl            4(%esp), %edx
+1:
+       movl            8(%esp), %ecx
+       movl            (%edx), %eax
+       orl                     %eax, %ecx
        lock
-       cmpxchgl        %ecx,(%edx)
-       jnz                     _atomic_or1
+       cmpxchgl        %ecx, (%edx)
+       jnz                     1b
        ret
 FUNCTION_END(atomic_or)
 
-/* int32       atomic_get(vint32 *value) */
+
+/* int32       atomic_get(int32* value) */
 FUNCTION(atomic_get):
        movl            4(%esp), %edx
-_atomic_get1:
        movl            (%edx), %eax
-       movl            %eax, %ecx
        lock
-       cmpxchgl        %ecx, (%edx)
-       jnz                     _atomic_get1
+       addl            $0, (%esp)
        ret
 FUNCTION_END(atomic_get)
 
-/* int64       atomic_set64(vint64 *value, int64 newValue) */
+
+/* void                atomic_set64(int64* value, int64 newValue) */
 FUNCTION(atomic_set64):
        push            %esi
        push            %ebx
        movl            12(%esp), %esi  /* value */
        movl            16(%esp), %ebx  /* newValue low */
        movl            20(%esp), %ecx  /* newValue high */
-_atomic_set64_1:
+1:
        movl            (%esi), %eax    /* testAgainst low */
        movl            4(%esi), %edx   /* testAgainst high */
        lock
        cmpxchg8b       (%esi)
-       jnz                     _atomic_set64_1
+       jnz                     1b
        pop                     %ebx
        pop                     %esi
        ret
 FUNCTION_END(atomic_set64)
 
-/* int64       atomic_test_and_set64(vint64 *value, int64 newValue, int64 
testAgainst) */
+
+/* void                atomic_get_and_set64(int64* value, int64 newValue) */
+FUNCTION(atomic_get_and_set64):
+       push            %esi
+       push            %ebx
+       movl            12(%esp), %esi  /* value */
+       movl            16(%esp), %ebx  /* newValue low */
+       movl            20(%esp), %ecx  /* newValue high */
+1:
+       movl            (%esi), %eax    /* testAgainst low */
+       movl            4(%esi), %edx   /* testAgainst high */
+       lock
+       cmpxchg8b       (%esi)
+       jnz                     1b
+       pop                     %ebx
+       pop                     %esi
+       ret
+FUNCTION_END(atomic_get_and_set64)
+
+
+/* int64       atomic_test_and_set64(int64* value, int64 newValue,
+       int64 testAgainst) */
 FUNCTION(atomic_test_and_set64):
        push            %esi
        push            %ebx
@@ -111,12 +151,13 @@ FUNCTION(atomic_test_and_set64):
        ret
 FUNCTION_END(atomic_test_and_set64)
 
-/* int64       atomic_add64(vint64 *value, int64 addValue) */
+
+/* int64       atomic_add64(int64* value, int64 addValue) */
 FUNCTION(atomic_add64):
        push            %esi
        push            %ebx
        movl            12(%esp), %esi
-_atomic_add64_1:
+1:
        movl            (%esi), %eax
        movl            4(%esi), %edx
        movl            %eax, %ebx
@@ -125,18 +166,18 @@ _atomic_add64_1:
        adcl            20(%esp), %ecx
        lock
        cmpxchg8b       (%esi)
-       jnz                     _atomic_add64_1
+       jnz                     1b
        pop                     %ebx
        pop                     %esi
        ret
 FUNCTION_END(atomic_add64)
 
-/* int64       atomic_and64(vint64 *value, int64 andValue) */
+/* int64       atomic_and64(int64* value, int64 andValue) */
 FUNCTION(atomic_and64):
        push            %esi
        push            %ebx
        movl            12(%esp), %esi
-_atomic_and64_1:
+1:
        movl            (%esi), %eax
        movl            4(%esi), %edx
        movl            %eax, %ebx
@@ -145,18 +186,19 @@ _atomic_and64_1:
        andl            20(%esp), %ecx
        lock
        cmpxchg8b       (%esi)
-       jnz                     _atomic_and64_1
+       jnz                     1b
        pop                     %ebx
        pop                     %esi
        ret
 FUNCTION_END(atomic_and64)
 
-/* int64       atomic_or64(vint64 *value, int64 orValue) */
+
+/* int64       atomic_or64(int64* value, int64 orValue) */
 FUNCTION(atomic_or64):
        push            %esi
        push            %ebx
        movl            12(%esp), %esi
-_atomic_or64_1:
+1:
        movl            (%esi), %eax
        movl            4(%esi), %edx
        movl            %eax, %ebx
@@ -165,26 +207,28 @@ _atomic_or64_1:
        orl                     20(%esp), %ecx
        lock
        cmpxchg8b       (%esi)
-       jnz                     _atomic_or64_1
+       jnz                     1b
        pop                     %ebx
        pop                     %esi
        ret
 FUNCTION_END(atomic_or64)
 
-/* int64       atomic_get64(vint64 *value) */
+
+/* int64       atomic_get64(int64* value) */
 FUNCTION(atomic_get64):
        push            %esi
        push            %ebx
        movl            12(%esp), %esi
-_atomic_get64_1:
+1:
        movl            (%esi), %eax
        movl            4(%esi), %edx
        movl            %eax, %ebx
        movl            %edx, %ecx
        lock
        cmpxchg8b       (%esi)
-       jnz                     _atomic_get64_1
+       jnz                     1b
        pop                     %ebx
        pop                     %esi
        ret
 FUNCTION_END(atomic_get64)
+
diff --git a/src/system/libroot/os/arch/x86_64/atomic.S 
b/src/system/libroot/os/arch/x86_64/atomic.S
index 7abf2d2..5c905ce 100644
--- a/src/system/libroot/os/arch/x86_64/atomic.S
+++ b/src/system/libroot/os/arch/x86_64/atomic.S
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2013, Paweł Dziepak, pdziepak@xxxxxxxxxxx.
  * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
  */
@@ -9,15 +10,23 @@
 
 .text
 
-/* int32 atomic_set(vint32 *value, int32 newValue) */
+
+/* int32 atomic_set(int32* value, int32 newValue) */
 FUNCTION(atomic_set):
-       movl            %esi, %eax
-       lock
-       xchgl           %eax, (%rdi)
+       sfence
+       movl            %esi, (%rdi)
        ret
 FUNCTION_END(atomic_set)
 
-/* int32 atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst) 
*/
+
+/* int32 atomic_get_and_set(int32* value, int32 newValue) */
+FUNCTION(atomic_get_and_set):
+       xchgl           %esi, (%rdi)
+       ret
+FUNCTION_END(atomic_get_and_set)
+
+
+/* int32 atomic_test_and_set(int32* value, int32 newValue, int32 testAgainst) 
*/
 FUNCTION(atomic_test_and_set):
        movl            %edx, %eax
        lock
@@ -25,15 +34,16 @@ FUNCTION(atomic_test_and_set):
        ret
 FUNCTION_END(atomic_test_and_set)
 
-/* int32 atomic_add(vint32 *value, int32 addValue) */
+
+/* int32 atomic_add(int32* value, int32 addValue) */
 FUNCTION(atomic_add):
-       movl            %esi, %eax
        lock
-       xaddl           %eax, (%rdi)
+       xaddl           %esi, (%rdi)
        ret
 FUNCTION_END(atomic_add)
 
-/* int32 atomic_and(vint32 *value, int32 andValue) */
+
+/* int32 atomic_and(int32* value, int32 andValue) */
 FUNCTION(atomic_and):
        movl            (%rdi), %eax
 1:     movl            %eax, %edx
@@ -46,7 +56,8 @@ FUNCTION(atomic_and):
        ret
 FUNCTION_END(atomic_and)
 
-/* int32 atomic_or(vint32 *value, int32 orValue) */
+
+/* int32 atomic_or(int32* value, int32 orValue) */
 FUNCTION(atomic_or):
        movl            (%rdi), %eax
 1:     movl            %eax, %edx
@@ -59,24 +70,31 @@ FUNCTION(atomic_or):
        ret
 FUNCTION_END(atomic_or)
 
-/* int32 atomic_get(vint32 *value) */
+
+/* int32 atomic_get(int32* value) */
 FUNCTION(atomic_get):
        movl            (%rdi), %eax
-1:     lock
-       cmpxchgl        %eax, (%rdi)
-       jnz                     1b
-       ret
+       lfence
 FUNCTION_END(atomic_get)
 
-/* int64 atomic_set64(vint64 *value, int64 newValue) */
+
+/* int64 atomic_set64(int64* value, int64 newValue) */
 FUNCTION(atomic_set64):
-       movq            %rsi, %rax
-       lock
-       xchgq           %rax, (%rdi)
+       sfence
+       movq            %rsi, (%rdi)
        ret
 FUNCTION_END(atomic_set64)
 
-/* int64 atomic_test_and_set64(vint64 *value, int64 newValue, int64 
testAgainst) */
+
+/* int64 atomic_get_and_set64(int64* value, int64 newValue) */
+FUNCTION(atomic_get_and_set64):
+       xchgq           %rsi, (%rdi)
+       ret
+FUNCTION_END(atomic_get_and_set64)
+
+
+/* int64 atomic_test_and_set64(int64* value, int64 newValue,
+       int64 testAgainst) */
 FUNCTION(atomic_test_and_set64):
        movq            %rdx, %rax
        lock
@@ -84,7 +102,8 @@ FUNCTION(atomic_test_and_set64):
        ret
 FUNCTION_END(atomic_test_and_set64)
 
-/* int64 atomic_add64(vint64 *value, int64 addValue) */
+
+/* int64 atomic_add64(int64* value, int64 addValue) */
 FUNCTION(atomic_add64):
        movq            %rsi, %rax
        lock
@@ -92,7 +111,8 @@ FUNCTION(atomic_add64):
        ret
 FUNCTION_END(atomic_add64)
 
-/* int64 atomic_and64(vint64 *value, int64 andValue) */
+
+/* int64 atomic_and64(int64* value, int64 andValue) */
 FUNCTION(atomic_and64):
        movq            (%rdi), %rax
 1:     movq            %rax, %rdx
@@ -105,7 +125,8 @@ FUNCTION(atomic_and64):
        ret
 FUNCTION_END(atomic_and64)
 
-/* int64 atomic_or64(vint64 *value, int64 orValue) */
+
+/* int64 atomic_or64(int64* value, int64 orValue) */
 FUNCTION(atomic_or64):
        movq            (%rdi), %rax
 1:     movq            %rax, %rdx
@@ -118,11 +139,10 @@ FUNCTION(atomic_or64):
        ret
 FUNCTION_END(atomic_or64)
 
-/* int64 atomic_get64(vint64 *value) */
+
+/* int64 atomic_get64(int64* value) */
 FUNCTION(atomic_get64):
        movq            (%rdi), %rax
-1:     lock
-       cmpxchgq        %rax, (%rdi)
-       jnz                     1b
-       ret
+       lfence
 FUNCTION_END(atomic_get64)
+
diff --git a/src/system/libroot/os/locks/init_once.cpp 
b/src/system/libroot/os/locks/init_once.cpp
index 2ec5e7e..ce3ad18 100644
--- a/src/system/libroot/os/locks/init_once.cpp
+++ b/src/system/libroot/os/locks/init_once.cpp
@@ -16,7 +16,7 @@ enum {
 
 
 status_t
-__init_once(vint32* control, status_t (*initRoutine)(void*), void* data)
+__init_once(int32* control, status_t (*initRoutine)(void*), void* data)
 {
        // Algorithm:
        // The control variable goes through at most four states:
@@ -41,7 +41,7 @@ __init_once(vint32* control, status_t (*initRoutine)(void*), 
void* data)
                // we're the first -- perform the initialization
                initRoutine(data);
 
-               value = atomic_set(control, STATE_INITIALIZED);
+               value = atomic_get_and_set(control, STATE_INITIALIZED);
 
                // If someone else is waiting, we need to delete the semaphore.
                if (value >= 0)
diff --git a/src/system/libroot/posix/pthread/pthread_key.cpp 
b/src/system/libroot/posix/pthread/pthread_key.cpp
index 956c980..431a4d4 100644
--- a/src/system/libroot/posix/pthread/pthread_key.cpp
+++ b/src/system/libroot/posix/pthread/pthread_key.cpp
@@ -106,7 +106,7 @@ pthread_key_delete(pthread_key_t key)
        if (key < 0 || key >= PTHREAD_KEYS_MAX)
                return EINVAL;
 
-       int32 sequence = atomic_set(&sKeyTable[key].sequence,
+       int32 sequence = atomic_get_and_set(&sKeyTable[key].sequence,
                PTHREAD_UNUSED_SEQUENCE);
        if (sequence == PTHREAD_UNUSED_SEQUENCE)
                return EINVAL;
diff --git a/src/system/libroot/posix/pthread/pthread_once.cpp 
b/src/system/libroot/posix/pthread/pthread_once.cpp
index e83627a..72926b8 100644
--- a/src/system/libroot/posix/pthread/pthread_once.cpp
+++ b/src/system/libroot/posix/pthread/pthread_once.cpp
@@ -26,7 +26,8 @@ init_function_canceled(void* data)
        pthread_once_t* onceControl = (pthread_once_t*)data;
 
        // reset the control state to uninitialized
-       int32 value = atomic_set((vint32*)&onceControl->state, 
STATE_UNINITIALIZED);
+       int32 value = atomic_get_and_set((int32*)&onceControl->state,
+                       STATE_UNINITIALIZED);
 
        // If someone has set a semaphore, delete it.
        if (value >= 0)
@@ -66,7 +67,8 @@ pthread_once(pthread_once_t* onceControl, void 
(*initRoutine)(void))
                        initRoutine();
                        pthread_cleanup_pop(false);
 
-                       value = atomic_set((vint32*)&onceControl->state, 
STATE_INITIALIZED);
+                       value = atomic_get_and_set((int32*)&onceControl->state,
+                                       STATE_INITIALIZED);
 
                        // If someone else is waiting, we need to delete the 
semaphore.
                        if (value >= 0)
@@ -105,7 +107,7 @@ pthread_once(pthread_once_t* onceControl, void 
(*initRoutine)(void))
                        return 0;
                } else if (value == STATE_SPINNING) {
                        // out of semaphores -- spin
-                       while (atomic_get((vint32*)&onceControl->state) == 
STATE_SPINNING);
+                       while (atomic_get((int32*)&onceControl->state) == 
STATE_SPINNING);
                }
        }
 }
diff --git a/src/tools/fs_shell/atomic.cpp b/src/tools/fs_shell/atomic.cpp
index 1899a9f..ebfcdc5 100644
--- a/src/tools/fs_shell/atomic.cpp
+++ b/src/tools/fs_shell/atomic.cpp
@@ -10,86 +10,100 @@
 #include "fssh_atomic.h"
 
 
+void
+fssh_atomic_set(int32_t* value, int32_t newValue)
+{
+       atomic_set((int32*)value, newValue);
+}
+
+
 int32_t
-fssh_atomic_set(vint32_t *value, int32_t newValue)
+fssh_atomic_get_and_set(int32_t* value, int32_t newValue)
 {
-       return atomic_set((vint32*)value, newValue);
+       return atomic_get_and_set((int32*)value, newValue);
 }
 
 
 int32_t
-fssh_atomic_test_and_set(vint32_t *value, int32_t newValue, int32_t 
testAgainst)
+fssh_atomic_test_and_set(int32_t *value, int32_t newValue, int32_t testAgainst)
 {
-       return atomic_test_and_set((vint32*)value, newValue, testAgainst);
+       return atomic_test_and_set((int32*)value, newValue, testAgainst);
 }
 
 
 int32_t
-fssh_atomic_add(vint32_t *value, int32_t addValue)
+fssh_atomic_add(int32_t *value, int32_t addValue)
 {
-       return atomic_add((vint32*)value, addValue);
+       return atomic_add((int32*)value, addValue);
 }
 
 
 int32_t
-fssh_atomic_and(vint32_t *value, int32_t andValue)
+fssh_atomic_and(int32_t *value, int32_t andValue)
 {
-       return atomic_and((vint32*)value, andValue);
+       return atomic_and((int32*)value, andValue);
 }
 
 
 int32_t
-fssh_atomic_or(vint32_t *value, int32_t orValue)       
+fssh_atomic_or(int32_t *value, int32_t orValue)        
 {
-       return atomic_or((vint32*)value, orValue);
+       return atomic_or((int32*)value, orValue);
 }
 
 
 int32_t
-fssh_atomic_get(vint32_t *value)
+fssh_atomic_get(int32_t *value)
+{
+       return atomic_get((int32*)value);
+}
+
+
+void
+fssh_atomic_set64(int64_t *value, int64_t newValue)
 {
-       return atomic_get((vint32*)value);
+       atomic_set64((int64*)value, newValue);
 }
 
 
 int64_t
-fssh_atomic_set64(vint64_t *value, int64_t newValue)
+fssh_atomic_get_and_set64(int64_t* value, int64_t newValue)
 {
-       return atomic_set64((vint64*)value, newValue);
+       return atomic_get_and_set64((int64*)value, newValue);
 }
 
 
 int64_t
-fssh_atomic_test_and_set64(vint64_t *value, int64_t newValue, int64_t 
testAgainst)
+fssh_atomic_test_and_set64(int64_t *value, int64_t newValue, int64_t 
testAgainst)
 {
-       return atomic_test_and_set64((vint64 *)value, newValue, testAgainst);
+       return atomic_test_and_set64((int64 *)value, newValue, testAgainst);
 }
 
 
 int64_t
-fssh_atomic_add64(vint64_t *value, int64_t addValue)
+fssh_atomic_add64(int64_t *value, int64_t addValue)
 {
-       return atomic_add64((vint64*)value, addValue);
+       return atomic_add64((int64*)value, addValue);
 }
 
 
 int64_t
-fssh_atomic_and64(vint64_t *value, int64_t andValue)
+fssh_atomic_and64(int64_t *value, int64_t andValue)
 {
-       return atomic_and64((vint64*)value, andValue);
+       return atomic_and64((int64*)value, andValue);
 }
 
 
 int64_t
-fssh_atomic_or64(vint64_t *value, int64_t orValue)     
+fssh_atomic_or64(int64_t *value, int64_t orValue)      
 {
-       return atomic_or64((vint64*)value, orValue);
+       return atomic_or64((int64*)value, orValue);
 }
 
 
 int64_t
-fssh_atomic_get64(vint64_t *value)
+fssh_atomic_get64(int64_t *value)
 {
-       return atomic_get64((vint64*)value);
+       return atomic_get64((int64*)value);
 }
 
diff --git a/src/tools/fs_shell/vfs.cpp b/src/tools/fs_shell/vfs.cpp
index 6a06804..f007068 100644
--- a/src/tools/fs_shell/vfs.cpp
+++ b/src/tools/fs_shell/vfs.cpp
@@ -3609,10 +3609,10 @@ common_lock_node(int fd, bool kernel)
        // We need to set the locking atomically - someone
        // else might set one at the same time
 #ifdef __x86_64__
-       if (fssh_atomic_test_and_set64((vint64_t *)&vnode->mandatory_locked_by,
+       if (fssh_atomic_test_and_set64((int64_t *)&vnode->mandatory_locked_by,
                        (fssh_addr_t)descriptor, 0) != 0)
 #else
-       if (fssh_atomic_test_and_set((vint32_t *)&vnode->mandatory_locked_by,
+       if (fssh_atomic_test_and_set((int32_t *)&vnode->mandatory_locked_by,
                        (fssh_addr_t)descriptor, 0) != 0)
 #endif
                status = FSSH_B_BUSY;
@@ -3637,10 +3637,10 @@ common_unlock_node(int fd, bool kernel)
        // We need to set the locking atomically - someone
        // else might set one at the same time
 #ifdef __x86_64__
-       if (fssh_atomic_test_and_set64((vint64_t *)&vnode->mandatory_locked_by,
+       if (fssh_atomic_test_and_set64((int64_t *)&vnode->mandatory_locked_by,
                        0, (fssh_addr_t)descriptor) != (int64_t)descriptor)
 #else
-       if (fssh_atomic_test_and_set((vint32_t *)&vnode->mandatory_locked_by,
+       if (fssh_atomic_test_and_set((int32_t *)&vnode->mandatory_locked_by,
                        0, (fssh_addr_t)descriptor) != (int32_t)descriptor)
 #endif
                status = FSSH_B_BAD_VALUE;

############################################################################

Commit:      273f2f38cd4b219ac8197888962d0710c149d606
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Nov  5 21:47:18 2013 UTC

kernel: Improve spinlock implementation

atomic_or() and atomic_and() are not supported by x86 are need to be
emulated using CAS. Use atomic_get_and_set() and atomic_set() instead.

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/smp.h b/headers/private/kernel/smp.h
index d20ee28..c7aa163 100644
--- a/headers/private/kernel/smp.h
+++ b/headers/private/kernel/smp.h
@@ -76,7 +76,7 @@ int smp_intercpu_int_handler(int32 cpu);
 static inline bool
 try_acquire_spinlock_inline(spinlock* lock)
 {
-       return atomic_or((int32*)lock, 1) == 0;
+       return atomic_get_and_set((int32*)lock, 1) == 0;
 }
 
 
@@ -92,7 +92,7 @@ acquire_spinlock_inline(spinlock* lock)
 static inline void
 release_spinlock_inline(spinlock* lock)
 {
-       atomic_and((int32*)lock, 0);
+       atomic_set((int32*)lock, 0);
 }
 
 
diff --git a/src/system/kernel/smp.cpp b/src/system/kernel/smp.cpp
index 1b2f886..caf29bf 100644
--- a/src/system/kernel/smp.cpp
+++ b/src/system/kernel/smp.cpp
@@ -313,7 +313,7 @@ try_acquire_spinlock(spinlock* lock)
        if (atomic_add(&lock->lock, 1) != 0)
                return false;
 #else
-       if (atomic_or((int32*)lock, 1) != 0)
+       if (atomic_get_and_set((int32*)lock, 1) != 0)
                return false;
 
 #      if DEBUG_SPINLOCKS
@@ -353,7 +353,7 @@ acquire_spinlock(spinlock* lock)
                                process_all_pending_ici(currentCPU);
                                PAUSE();
                        }
-                       if (atomic_or((int32*)lock, 1) == 0)
+                       if (atomic_get_and_set((int32*)lock, 1) == 0)
                                break;
                }
 
@@ -364,7 +364,7 @@ acquire_spinlock(spinlock* lock)
        } else {
 #if DEBUG_SPINLOCKS
                int32 oldValue;
-               oldValue = atomic_or((int32*)lock, 1);
+               oldValue = atomic_get_and_set((int32*)lock, 1);
                if (oldValue != 0) {
                        panic("acquire_spinlock: attempt to acquire lock %p 
twice on "
                                "non-SMP system (last caller: %p, value %" 
B_PRId32 ")", lock,
@@ -407,13 +407,13 @@ acquire_spinlock_nocheck(spinlock *lock)
                                PAUSE();
                        }
 
-                       if (atomic_or((int32*)lock, 1) == 0)
+                       if (atomic_get_and_set((int32*)lock, 1) == 0)
                                break;
                }
 #endif
        } else {
 #if DEBUG_SPINLOCKS
-               if (atomic_or((int32*)lock, 1) != 0) {
+               if (atomic_get_and_set((int32*)lock, 1) != 0) {
                        panic("acquire_spinlock_nocheck: attempt to acquire 
lock %p twice "
                                "on non-SMP system\n", lock);
                }
@@ -450,7 +450,7 @@ acquire_spinlock_cpu(int32 currentCPU, spinlock *lock)
                                process_all_pending_ici(currentCPU);
                                PAUSE();
                        }
-                       if (atomic_or((int32*)lock, 1) == 0)
+                       if (atomic_get_and_set((int32*)lock, 1) == 0)
                                break;
                }
 
@@ -461,7 +461,7 @@ acquire_spinlock_cpu(int32 currentCPU, spinlock *lock)
        } else {
 #if DEBUG_SPINLOCKS
                int32 oldValue;
-               oldValue = atomic_or((int32*)lock, 1);
+               oldValue = atomic_get_and_set((int32*)lock, 1);
                if (oldValue != 0) {
                        panic("acquire_spinlock_cpu(): attempt to acquire lock 
%p twice on "
                                "non-SMP system (last caller: %p, value %" 
B_PRId32 ")", lock,
@@ -498,9 +498,11 @@ release_spinlock(spinlock *lock)
                                }
                        }
                }
-#else
-               if (atomic_and((int32*)lock, 0) != 1)
+#elif DEBUG_SPINLOCKS
+               if (atomic_get_and_set((int32*)lock, 0) != 1)
                        panic("release_spinlock: lock %p was already 
released\n", lock);
+#else
+               atomic_set((int32*)lock, 0);
 #endif
        } else {
 #if DEBUG_SPINLOCKS
@@ -508,7 +510,7 @@ release_spinlock(spinlock *lock)
                        panic("release_spinlock: attempt to release lock %p 
with "
                                "interrupts enabled\n", lock);
                }
-               if (atomic_and((int32*)lock, 0) != 1)
+               if (atomic_get_and_set((int32*)lock, 0) != 1)
                        panic("release_spinlock: lock %p was already 
released\n", lock);
 #endif
 #if DEBUG_SPINLOCK_LATENCIES

############################################################################

Commit:      d929eb20ba2f9f884a9013b14b3fd85cbad4ba12
Author:      Pawel Dziepak <pdziepak@xxxxxxxxxxx>
Date:        Tue Jul 23 20:24:15 2013 UTC

docs: Update atomic_*() functions documentation

----------------------------------------------------------------------------

diff --git a/docs/user/support/SupportDefs.dox 
b/docs/user/support/SupportDefs.dox
index 8c29d42..962a9cc 100644
--- a/docs/user/support/SupportDefs.dox
+++ b/docs/user/support/SupportDefs.dox
@@ -627,17 +627,36 @@
 //! @{
 
 
-/*!    \fn int32 atomic_set(vint32 *value, int32 newValue)
+/*!    \fn void atomic_set(int32* value, int32 newValue)
        \brief Atomically set the variable \a value to \a newvalue.
 
        This is a thread-safe way of performing the \c *value \c = \c newValue
        operation. You should use these function when two or more threads might
        access the variable simultaneously. You don't have to use a semaphore 
or a
+       mutex in this case. The variable must be naturally aligned.
+
+       \sa atomic_set64() for a version that works on \c long \c long.
+       \sa atomic_test_and_set()
+       \sa atomic_add()
+       \sa atomic_and()
+       \sa atomic_or(),
+       \sa atomic_get()
+*/
+
+
+/*!    \fn int32 atomic_get_and_set(int32* value, int32 newValue)
+       \brief Atomically set the variable \a value to \a newvalue and return 
the
+               old value.
+
+       This is a thread-safe way of performing the \c *value \c = \c newValue
+       operation. You should use these function when two or more threads might
+       access the variable simultaneously. You don't have to use a semaphore 
or a
        mutex in this case. 
 
        \return The original value of \c value.
 
-       \sa atomic_set64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set64() for a version that works on \c long \c long.
+       \sa atomic_set()
        \sa atomic_test_and_set()
        \sa atomic_add()
        \sa atomic_and()
@@ -646,7 +665,7 @@
 */
 
 
-/*!    \fn int32 atomic_test_and_set(vint32 *value, int32 newValue,
+/*!    \fn int32 atomic_test_and_set(int32* value, int32 newValue,
                int32 testAgainst)
        \brief Atomically set the variable \a value to \a newValue if the 
current
                   value is \a testAgainst.
@@ -659,6 +678,7 @@
        \return The original value of \c value.
 
        \sa atomic_test_and_set64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set()
        \sa atomic_set()
        \sa atomic_add()
        \sa atomic_and()
@@ -668,7 +688,7 @@
 
 
 /*!
-       \fn int32 atomic_add(vint32 *value, int32 addValue)
+       \fn int32 atomic_add(int32* value, int32 addValue)
        \brief Atomically add the value of \a addValue to \a value.
 
        This is a thread-safe way of performing the \c *value \c += \c addValue
@@ -679,6 +699,7 @@
        \return The original value of \c value.
 
        \sa atomic_add64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set()
        \sa atomic_set()
        \sa atomic_test_and_set()
        \sa atomic_and()
@@ -687,7 +708,7 @@
 */
 
 
-/*!    \fn int32 atomic_and(vint32 *value, int32 andValue)
+/*!    \fn int32 atomic_and(int32* value, int32 andValue)
        \brief Atomically perform a bitwise AND operation of \a andValue to the 
          variable \a andValue.
 
@@ -699,6 +720,7 @@
        \return The original value of \c value.
 
        \sa atomic_and64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set()
        \sa atomic_set()
        \sa atomic_test_and_set()
        \sa atomic_add()
@@ -709,7 +731,7 @@
 
 
 /*!
-       \fn int32 atomic_or(vint32 *value, int32 orValue)
+       \fn int32 atomic_or(int32* value, int32 orValue)
        \brief Atomically perform a bitwise OR operation of \a orValue to the 
                   variable \a andValue.
 
@@ -721,6 +743,7 @@
        \return The original value of \c value.
 
        \sa atomic_or64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set()
        \sa atomic_set()
        \sa atomic_test_and_set()
        \sa atomic_add()
@@ -730,17 +753,18 @@
 
 
 /*!
-       \fn int32 atomic_get(vint32 *value)
+       \fn int32 atomic_get(int32* value)
        \brief Atomically return the value of \c value.
 
        This is a thread-safe way of reading the contents of the \c value
        operation. You should use these function when two or more threads might
        access the variable simultaneously. You don't have to use a semaphore 
or a
-       mutex in this case. 
+       mutex in this case. The variable must be naturally aligned.
 
        \return The original value of \c value.
 
        \sa atomic_get64() for a version that works on \c long \c long.
+       \sa atomic_get_and_set()
        \sa atomic_set()
        \sa atomic_test_and_set()
        \sa atomic_add()
@@ -750,17 +774,38 @@
 
 
 /*!
-       \fn int64 atomic_set64(vint64 *value, int64 newValue)
+       \fn void atomic_set64(int64* value, int64 newValue)
        \brief Atomically set the variable \a value to \a newvalue.
 
        This is a thread-safe way of performing the \c *value \c = \c newValue
        operation. You should use these function when two or more threads might
        access the variable simultaneously. You don't have to use a semaphore 
or a
+       mutex in this case. The variable must be naturally aligned.
+
+       \sa atomic_set() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
+       \sa atomic_test_and_set64()
+       \sa atomic_add64()
+       \sa atomic_and64()
+       \sa atomic_or64()
+       \sa atomic_get64()
+*/
+
+
+/*!
+       \fn int64 atomic_get_and_set64(int64* value, int64 newValue)
+       \brief Atomically set the variable \a value to \a newvalue and return
+               the old value.
+
+       This is a thread-safe way of performing the \c *value \c = \c newValue
+       operation. You should use these function when two or more threads might
+       access the variable simultaneously. You don't have to use a semaphore 
or a
        mutex in this case. 
 
        \return The original value of \c value.
 
-       \sa atomic_set() for a version that works on an \c int32.
+       \sa atomic_get_and_set() for a version that works on an \c int32.
+       \sa atomic_set64()
        \sa atomic_test_and_set64()
        \sa atomic_add64()
        \sa atomic_and64()
@@ -770,7 +815,7 @@
 
 
 /*!
-       \fn int64 atomic_test_and_set64(vint64 *value, int64 newValue,
+       \fn int64 atomic_test_and_set64(int64* value, int64 newValue,
                int64 testAgainst)
        \brief Atomically set the variable \a value to \a newValue if the 
current
          value is \a testAgainst.
@@ -783,6 +828,7 @@
        \return The original value of \c value.
 
        \sa atomic_test_and_set() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
        \sa atomic_set64()
        \sa atomic_add64()
        \sa atomic_and64()
@@ -792,7 +838,7 @@
 
 
 /*!
-       \fn int64 atomic_add64(vint64 *value, int64 addValue)
+       \fn int64 atomic_add64(int64* value, int64 addValue)
        \brief Atomically add the value of \a addValue to \a value.
 
        This is a thread-safe way of performing the \c *value \c += \c addValue
@@ -803,6 +849,7 @@
        \return The original value of \c value.
 
        \sa atomic_add() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
        \sa atomic_set64()
        \sa atomic_test_and_set64()
        \sa atomic_and64()
@@ -812,7 +859,7 @@
 
 
 /*!
-       \fn int64 atomic_and64(vint64 *value, int64 andValue)
+       \fn int64 atomic_and64(int64* value, int64 andValue)
        \brief Atomically perform a bitwise AND operation of \a andValue to the
                   variable \a andValue.
 
@@ -824,6 +871,7 @@
        \return The original value of \c value.
 
        \sa atomic_and() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
        \sa atomic_set64()
        \sa atomic_test_and_set64()
        \sa atomic_add64()
@@ -832,7 +880,7 @@
 */
 
 
-/*!    \fn int64 atomic_or64(vint64 *value, int64 orValue)
+/*!    \fn int64 atomic_or64(int64* value, int64 orValue)
        \brief Atomically perform a bitwise OR operation of \a orValue to the
                   variable \a andValue.
 
@@ -844,6 +892,7 @@
        \return The original value of \c value.
 
        \sa atomic_or() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
        \sa atomic_set64()
        \sa atomic_test_and_set64()
        \sa atomic_add64()
@@ -853,17 +902,18 @@
 
 
 /*!
-       \fn int64 atomic_get64(vint64 *value)
+       \fn int64 atomic_get64(int64* value)
        \brief Atomically return the value of \c value.
 
        This is a thread-safe way of reading the contents of the \c value
        operation. You should use these function when two or more threads might
        access the variable simultaneously. You don't have to use a semaphore 
or a
-       mutex in this case. 
+       mutex in this case. The variable must be naturally aligned.
 
        \return The original value of \c value.
 
        \sa atomic_get() for a version that works on an \c int32.
+       \sa atomic_get_and_set64()
        \sa atomic_set64()
        \sa atomic_test_and_set64()
        \sa atomic_add64()


Other related posts:

  • » [haiku-commits] BRANCH pdziepak-github.scheduler [d929eb2] src/system/kernel src/system/libroot/os/arch/x86 src/tools/fs_shell docs/user/support headers/os/support - pdziepak-github . scheduler