[haiku-commits] r35206 - haiku/trunk/src/system/kernel/slab

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Wed, 20 Jan 2010 19:40:16 +0100 (CET)

Author: bonefish
Date: 2010-01-20 19:40:16 +0100 (Wed, 20 Jan 2010)
New Revision: 35206
Changeset: http://dev.haiku-os.org/changeset/35206/haiku

Modified:
   haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/HashedObjectCache.h
   haiku/trunk/src/system/kernel/slab/ObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/ObjectCache.h
   haiku/trunk/src/system/kernel/slab/Slab.cpp
   haiku/trunk/src/system/kernel/slab/SmallObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/SmallObjectCache.h
Log:
Always unlock the object cache while allocating memory. This is necessary for
the CACHE_DONT_SLEEP flag to work for real, since otherwise the thread could
block on the mutex held by a thread allocating memory. We use two condition
variables to prevent multiple threads from allocating slabs at the same time.


Modified: haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp    2010-01-20 
18:26:24 UTC (rev 35205)
+++ haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp    2010-01-20 
18:40:16 UTC (rev 35206)
@@ -77,24 +77,22 @@
 
 
 slab*
-HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
+HashedObjectCache::CreateSlab(uint32 flags)
 {
        if (!check_cache_quota(this))
                return NULL;
 
-       if (unlockWhileAllocating)
-               Unlock();
+       Unlock();
 
        slab* slab = allocate_slab(flags);
 
-       if (unlockWhileAllocating)
-               Lock();
+       Lock();
 
        if (slab == NULL)
                return NULL;
 
        void* pages;
-       if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == 
B_OK) {
+       if ((this->*allocate_pages)(&pages, flags) == B_OK) {
                if (InitSlab(slab, pages, slab_size, flags))
                        return slab;
 

Modified: haiku/trunk/src/system/kernel/slab/HashedObjectCache.h
===================================================================
--- haiku/trunk/src/system/kernel/slab/HashedObjectCache.h      2010-01-20 
18:26:24 UTC (rev 35205)
+++ haiku/trunk/src/system/kernel/slab/HashedObjectCache.h      2010-01-20 
18:40:16 UTC (rev 35206)
@@ -23,8 +23,7 @@
                                                                        
object_cache_destructor destructor,
                                                                        
object_cache_reclaimer reclaimer);
 
-       virtual slab*                           CreateSlab(uint32 flags,
-                                                                       bool 
unlockWhileAllocating);
+       virtual slab*                           CreateSlab(uint32 flags);
        virtual void                            ReturnSlab(slab* slab);
        virtual slab*                           ObjectSlab(void* object) const;
 

Modified: haiku/trunk/src/system/kernel/slab/ObjectCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/ObjectCache.cpp  2010-01-20 18:26:24 UTC 
(rev 35205)
+++ haiku/trunk/src/system/kernel/slab/ObjectCache.cpp  2010-01-20 18:40:16 UTC 
(rev 35206)
@@ -85,6 +85,8 @@
        this->flags = flags;
 
        resize_request = NULL;
+       resize_entry_can_wait = NULL;
+       resize_entry_dont_wait = NULL;
 
        // no gain in using the depot in single cpu setups
        if (smp_get_num_cpus() == 1)
@@ -295,8 +297,7 @@
 
 
 status_t
-ObjectCache::AllocatePages(void** pages, uint32 flags,
-       bool unlockWhileAllocating)
+ObjectCache::AllocatePages(void** pages, uint32 flags)
 {
        TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags);
 
@@ -309,8 +310,7 @@
                && slab_size != B_PAGE_SIZE)
                addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
 
-       if (unlockWhileAllocating)
-               Unlock();
+       Unlock();
 
        // if we are allocating, it is because we need the pages immediatly
        // so we lock them. when moving the slab to the empty list we should
@@ -320,8 +320,7 @@
                B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
                (flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
 
-       if (unlockWhileAllocating)
-               Lock();
+       Lock();
 
        if (areaId < 0)
                return areaId;
@@ -353,20 +352,17 @@
 
 
 status_t
-ObjectCache::EarlyAllocatePages(void** pages, uint32 flags,
-       bool unlockWhileAllocating)
+ObjectCache::EarlyAllocatePages(void** pages, uint32 flags)
 {
        TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size,
                flags);
 
-       if (unlockWhileAllocating)
-               Unlock();
+       Unlock();
 
        addr_t base = vm_allocate_early(sKernelArgs, slab_size,
                slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
 
-       if (unlockWhileAllocating)
-               Lock();
+       Lock();
 
        *pages = (void*)base;
 

Modified: haiku/trunk/src/system/kernel/slab/ObjectCache.h
===================================================================
--- haiku/trunk/src/system/kernel/slab/ObjectCache.h    2010-01-20 18:26:24 UTC 
(rev 35205)
+++ haiku/trunk/src/system/kernel/slab/ObjectCache.h    2010-01-20 18:40:16 UTC 
(rev 35206)
@@ -8,6 +8,7 @@
 #define OBJECT_CACHE_H
 
 
+#include <condition_variable.h>
 #include <lock.h>
 #include <slab/ObjectDepot.h>
 #include <slab/Slab.h>
@@ -31,6 +32,10 @@
 
 typedef DoublyLinkedList<slab> SlabList;
 
+struct ObjectCacheResizeEntry {
+       ConditionVariable       condition;
+};
+
 struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
                        char                            name[32];
                        mutex                           lock;
@@ -53,13 +58,16 @@
 
                        ResizeRequest*          resize_request;
 
+                       ObjectCacheResizeEntry* resize_entry_can_wait;
+                       ObjectCacheResizeEntry* resize_entry_dont_wait;
+
                        void*                           cookie;
                        object_cache_constructor constructor;
                        object_cache_destructor destructor;
                        object_cache_reclaimer reclaimer;
 
                        status_t                        
(ObjectCache::*allocate_pages)(void** pages,
-                                                                       uint32 
flags, bool unlockWhileAllocating);
+                                                                       uint32 
flags);
                        void                            
(ObjectCache::*free_pages)(void* pages);
 
                        object_depot            depot;
@@ -76,8 +84,7 @@
                        void                            InitPostArea();
                        void                            Delete();
 
-       virtual slab*                           CreateSlab(uint32 flags,
-                                                                       bool 
unlockWhileAllocating) = 0;
+       virtual slab*                           CreateSlab(uint32 flags) = 0;
        virtual void                            ReturnSlab(slab* slab) = 0;
        virtual slab*                           ObjectSlab(void* object) const 
= 0;
 
@@ -95,11 +102,9 @@
                        void                            Unlock()        { 
mutex_unlock(&lock); }
 
        static  void                            SetKernelArgs(kernel_args* 
args);
-                       status_t                        AllocatePages(void** 
pages, uint32 flags,
-                                                                       bool 
unlockWhileAllocating);
+                       status_t                        AllocatePages(void** 
pages, uint32 flags);
                        void                            FreePages(void* pages);
-                       status_t                        
EarlyAllocatePages(void** pages, uint32 flags,
-                                                                       bool 
unlockWhileAllocating);
+                       status_t                        
EarlyAllocatePages(void** pages, uint32 flags);
                        void                            EarlyFreePages(void* 
pages);
 
 private:

Modified: haiku/trunk/src/system/kernel/slab/Slab.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/Slab.cpp 2010-01-20 18:26:24 UTC (rev 
35205)
+++ haiku/trunk/src/system/kernel/slab/Slab.cpp 2010-01-20 18:40:16 UTC (rev 
35206)
@@ -316,17 +316,55 @@
 */
 static status_t
 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
-       uint32 flags, bool unlockWhileAllocating)
+       uint32 flags)
 {
+       // If someone else is already adding slabs, we wait for that to be 
finished
+       // first.
+       while (true) {
+               if (objectCount <= cache->total_objects - cache->used_count)
+                       return B_OK;
+
+               ObjectCacheResizeEntry* resizeEntry = NULL;
+               if (cache->resize_entry_dont_wait != NULL) {
+                       resizeEntry = cache->resize_entry_dont_wait;
+               } else if (cache->resize_entry_can_wait != NULL
+                               && (flags & CACHE_DONT_SLEEP) == 0) {
+                       resizeEntry = cache->resize_entry_can_wait;
+               } else
+                       break;
+
+               ConditionVariableEntry entry;
+               resizeEntry->condition.Add(&entry);
+
+               cache->Unlock();
+               entry.Wait();
+               cache->Lock();
+       }
+
+       // prepare the resize entry others can wait on
+       ObjectCacheResizeEntry*& resizeEntry = (flags & CACHE_DONT_SLEEP) != 0
+               ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
+
+       ObjectCacheResizeEntry myResizeEntry;
+       resizeEntry = &myResizeEntry;
+       resizeEntry->condition.Init(cache, "wait for slabs");
+
+       // add new slabs until there are as many free ones as requested
        while (objectCount > cache->total_objects - cache->used_count) {
-               slab* newSlab = cache->CreateSlab(flags, unlockWhileAllocating);
-               if (newSlab == NULL)
+               slab* newSlab = cache->CreateSlab(flags);
+               if (newSlab == NULL) {
+                       resizeEntry->condition.NotifyAll();
+                       resizeEntry = NULL;
                        return B_NO_MEMORY;
+               }
 
                cache->empty.Add(newSlab);
                cache->empty_count++;
        }
 
+       resizeEntry->condition.NotifyAll();
+       resizeEntry = NULL;
+
        return B_OK;
 }
 
@@ -422,7 +460,7 @@
                MutexLocker cacheLocker(cache->lock);
 
                status_t error = object_cache_reserve_internal(cache,
-                       cache->min_object_reserve, 0, true);
+                       cache->min_object_reserve, 0);
                if (error != B_OK) {
                        dprintf("object cache resizer: Failed to resize object 
cache "
                                "%p!\n", cache);
@@ -557,7 +595,7 @@
 
        if (cache->partial.IsEmpty()) {
                if (cache->empty.IsEmpty()) {
-                       if (object_cache_reserve_internal(cache, 1, flags, 
false) < B_OK) {
+                       if (object_cache_reserve_internal(cache, 1, flags) < 
B_OK) {
                                T(Alloc(cache, flags, NULL));
                                return NULL;
                        }
@@ -625,7 +663,7 @@
        T(Reserve(cache, objectCount, flags));
 
        MutexLocker _(cache->lock);
-       return object_cache_reserve_internal(cache, objectCount, flags, false);
+       return object_cache_reserve_internal(cache, objectCount, flags);
 }
 
 

Modified: haiku/trunk/src/system/kernel/slab/SmallObjectCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/SmallObjectCache.cpp     2010-01-20 
18:26:24 UTC (rev 35205)
+++ haiku/trunk/src/system/kernel/slab/SmallObjectCache.cpp     2010-01-20 
18:40:16 UTC (rev 35206)
@@ -40,13 +40,13 @@
 
 
 slab*
-SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
+SmallObjectCache::CreateSlab(uint32 flags)
 {
        if (!check_cache_quota(this))
                return NULL;
 
        void* pages;
-       if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) != 
B_OK)
+       if ((this->*allocate_pages)(&pages, flags) != B_OK)
                return NULL;
 
        return InitSlab(slab_in_pages(pages, slab_size), pages,

Modified: haiku/trunk/src/system/kernel/slab/SmallObjectCache.h
===================================================================
--- haiku/trunk/src/system/kernel/slab/SmallObjectCache.h       2010-01-20 
18:26:24 UTC (rev 35205)
+++ haiku/trunk/src/system/kernel/slab/SmallObjectCache.h       2010-01-20 
18:40:16 UTC (rev 35206)
@@ -19,8 +19,7 @@
                                                                        
object_cache_destructor destructor,
                                                                        
object_cache_reclaimer reclaimer);
 
-       virtual slab*                           CreateSlab(uint32 flags,
-                                                                       bool 
unlockWhileAllocating);
+       virtual slab*                           CreateSlab(uint32 flags);
        virtual void                            ReturnSlab(slab* slab);
        virtual slab*                           ObjectSlab(void* object) const;
 };


Other related posts:

  • » [haiku-commits] r35206 - haiku/trunk/src/system/kernel/slab - ingo_weinhold