[haiku-commits] r35174 - in haiku/trunk: headers/private/kernel/slab src/system/kernel/slab src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 19 Jan 2010 20:13:25 +0100 (CET)

Author: bonefish
Date: 2010-01-19 20:13:25 +0100 (Tue, 19 Jan 2010)
New Revision: 35174
Changeset: http://dev.haiku-os.org/changeset/35174/haiku

Added:
   haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/HashedObjectCache.h
   haiku/trunk/src/system/kernel/slab/ObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/ObjectCache.h
   haiku/trunk/src/system/kernel/slab/SmallObjectCache.cpp
   haiku/trunk/src/system/kernel/slab/SmallObjectCache.h
Modified:
   haiku/trunk/headers/private/kernel/slab/ObjectDepot.h
   haiku/trunk/headers/private/kernel/slab/Slab.h
   haiku/trunk/src/system/kernel/slab/Jamfile
   haiku/trunk/src/system/kernel/slab/ObjectDepot.cpp
   haiku/trunk/src/system/kernel/slab/Slab.cpp
   haiku/trunk/src/system/kernel/slab/slab_private.h
   haiku/trunk/src/system/kernel/vm/vm.cpp
Log:
* Split the slab allocator code into separate source files and C++-ified
  things a bit.
* Some style cleanup.
* The object depot does now have a cookie that will be passed to the return
  hook.
* Fixed object_cache_return_object_wrapper() using the new cookie.


Modified: haiku/trunk/headers/private/kernel/slab/ObjectDepot.h
===================================================================
--- haiku/trunk/headers/private/kernel/slab/ObjectDepot.h       2010-01-19 
18:51:30 UTC (rev 35173)
+++ haiku/trunk/headers/private/kernel/slab/ObjectDepot.h       2010-01-19 
19:13:25 UTC (rev 35174)
@@ -10,13 +10,19 @@
 #include <KernelExport.h>
 
 
+struct DepotMagazine;
+
 typedef struct object_depot {
-       recursive_lock lock;
-       struct depot_magazine *full, *empty;
-       size_t full_count, empty_count;
-       struct depot_cpu_store *stores;
+       recursive_lock                  lock;
+       DepotMagazine*                  full;
+       DepotMagazine*                  empty;
+       size_t                                  full_count;
+       size_t                                  empty_count;
+       struct depot_cpu_store* stores;
 
-       void (*return_object)(struct object_depot *depot, void *object);
+       void*   cookie;
+       void (*return_object)(struct object_depot* depot, void* cookie,
+               void* object);
 } object_depot;
 
 
@@ -24,14 +30,14 @@
 extern "C" {
 #endif
 
-status_t object_depot_init(object_depot *depot, uint32 flags,
-       void (*returnObject)(object_depot *, void *));
-void object_depot_destroy(object_depot *depot);
+status_t object_depot_init(object_depot* depot, uint32 flags, void *cookie,
+       void (*returnObject)(object_depot* depot, void* cookie, void* object));
+void object_depot_destroy(object_depot* depot);
 
-void *object_depot_obtain(object_depot *depot);
-int object_depot_store(object_depot *depot, void *object);
+void* object_depot_obtain(object_depot* depot);
+int object_depot_store(object_depot* depot, void* object);
 
-void object_depot_make_empty(object_depot *depot);
+void object_depot_make_empty(object_depot* depot);
 
 #ifdef __cplusplus
 }

Modified: haiku/trunk/headers/private/kernel/slab/Slab.h
===================================================================
--- haiku/trunk/headers/private/kernel/slab/Slab.h      2010-01-19 18:51:30 UTC 
(rev 35173)
+++ haiku/trunk/headers/private/kernel/slab/Slab.h      2010-01-19 19:13:25 UTC 
(rev 35174)
@@ -26,37 +26,38 @@
        CACHE_DURING_BOOT               = 1 << 31
 };
 
-typedef struct object_cache object_cache;
+struct ObjectCache;
+typedef struct ObjectCache object_cache;
 
-typedef status_t (*object_cache_constructor)(void *cookie, void *object);
-typedef void (*object_cache_destructor)(void *cookie, void *object);
-typedef void (*object_cache_reclaimer)(void *cookie, int32 level);
+typedef status_t (*object_cache_constructor)(void* cookie, void* object);
+typedef void (*object_cache_destructor)(void* cookie, void* object);
+typedef void (*object_cache_reclaimer)(void* cookie, int32 level);
 
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-object_cache *create_object_cache(const char *name, size_t object_size,
-       size_t alignment, void *cookie, object_cache_constructor constructor,
+object_cache* create_object_cache(const char* name, size_t object_size,
+       size_t alignment, void* cookie, object_cache_constructor constructor,
        object_cache_destructor);
-object_cache *create_object_cache_etc(const char *name, size_t object_size,
-       size_t alignment, size_t max_byte_usage, uint32 flags, void *cookie,
+object_cache* create_object_cache_etc(const char* name, size_t object_size,
+       size_t alignment, size_t max_byte_usage, uint32 flags, void* cookie,
        object_cache_constructor constructor, object_cache_destructor 
destructor,
        object_cache_reclaimer reclaimer);
 
-void delete_object_cache(object_cache *cache);
+void delete_object_cache(object_cache* cache);
 
-status_t object_cache_set_minimum_reserve(object_cache *cache,
+status_t object_cache_set_minimum_reserve(object_cache* cache,
        size_t objectCount);
 
-void *object_cache_alloc(object_cache *cache, uint32 flags);
-void object_cache_free(object_cache *cache, void *object);
+void* object_cache_alloc(object_cache* cache, uint32 flags);
+void object_cache_free(object_cache* cache, void* object);
 
-status_t object_cache_reserve(object_cache *cache, size_t object_count,
+status_t object_cache_reserve(object_cache* cache, size_t object_count,
        uint32 flags);
 
-void object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory);
+void object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory);
 
 #ifdef __cplusplus
 }

Added: haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp                    
        (rev 0)
+++ haiku/trunk/src/system/kernel/slab/HashedObjectCache.cpp    2010-01-19 
19:13:25 UTC (rev 35174)
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2008, Axel Dörfler. All Rights Reserved.
+ * Copyright 2007, Hugo Santos. All Rights Reserved.
+ *
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include "HashedObjectCache.h"
+
+#include "slab_private.h"
+
+
+static inline int
+__fls0(size_t value)
+{
+       if (value == 0)
+               return -1;
+
+       int bit;
+       for (bit = 0; value != 1; bit++)
+               value >>= 1;
+       return bit;
+}
+
+
+static slab*
+allocate_slab(uint32 flags)
+{
+       return (slab*)slab_internal_alloc(sizeof(slab), flags);
+}
+
+
+static void
+free_slab(slab* slab)
+{
+       slab_internal_free(slab);
+}
+
+
+// #pragma mark -
+
+
+HashedObjectCache::HashedObjectCache()
+       :
+       hash_table(this)
+{
+}
+
+
+/*static*/ HashedObjectCache*
+HashedObjectCache::Create(const char* name, size_t object_size,
+       size_t alignment, size_t maximum, uint32 flags, void* cookie,
+       object_cache_constructor constructor, object_cache_destructor 
destructor,
+       object_cache_reclaimer reclaimer)
+{
+       void* buffer = slab_internal_alloc(sizeof(HashedObjectCache), flags);
+       if (buffer == NULL)
+               return NULL;
+
+       HashedObjectCache* cache = new(buffer) HashedObjectCache();
+
+       if (cache->Init(name, object_size, alignment, maximum, flags, cookie,
+                       constructor, destructor, reclaimer) != B_OK) {
+               cache->Delete();
+               return NULL;
+       }
+
+       if ((flags & CACHE_LARGE_SLAB) != 0)
+               cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
+       else
+               cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
+       cache->lower_boundary = __fls0(cache->object_size);
+
+       return cache;
+}
+
+
+slab*
+HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
+{
+       if (!check_cache_quota(this))
+               return NULL;
+
+       if (unlockWhileAllocating)
+               Unlock();
+
+       slab* slab = allocate_slab(flags);
+
+       if (unlockWhileAllocating)
+               Lock();
+
+       if (slab == NULL)
+               return NULL;
+
+       void* pages;
+       if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == 
B_OK) {
+               if (InitSlab(slab, pages, slab_size))
+                       return slab;
+
+               (this->*free_pages)(pages);
+       }
+
+       free_slab(slab);
+       return NULL;
+}
+
+
+void
+HashedObjectCache::ReturnSlab(slab* slab)
+{
+       UninitSlab(slab);
+       (this->*free_pages)(slab->pages);
+}
+
+
+slab*
+HashedObjectCache::ObjectSlab(void* object) const
+{
+       Link* link = hash_table.Lookup(object);
+       if (link == NULL) {
+               panic("object cache: requested object %p missing from hash 
table",
+                       object);
+               return NULL;
+       }
+       return link->parent;
+}
+
+
+status_t
+HashedObjectCache::PrepareObject(slab* source, void* object)
+{
+       Link* link = _AllocateLink(CACHE_DONT_SLEEP);
+       if (link == NULL)
+               return B_NO_MEMORY;
+
+       link->buffer = object;
+       link->parent = source;
+
+       hash_table.Insert(link);
+       return B_OK;
+}
+
+
+void
+HashedObjectCache::UnprepareObject(slab* source, void* object)
+{
+       Link* link = hash_table.Lookup(object);
+       if (link == NULL) {
+               panic("object cache: requested object missing from hash table");
+               return;
+       }
+
+       if (link->parent != source) {
+               panic("object cache: slab mismatch");
+               return;
+       }
+
+       hash_table.Remove(link);
+       _FreeLink(link);
+}
+
+
+/*static*/ inline HashedObjectCache::Link*
+HashedObjectCache::_AllocateLink(uint32 flags)
+{
+       return (HashedObjectCache::Link*)
+               slab_internal_alloc(sizeof(HashedObjectCache::Link), flags);
+}
+
+
+/*static*/ inline void
+HashedObjectCache::_FreeLink(HashedObjectCache::Link* link)
+{
+       slab_internal_free(link);
+}

Added: haiku/trunk/src/system/kernel/slab/HashedObjectCache.h
===================================================================
--- haiku/trunk/src/system/kernel/slab/HashedObjectCache.h                      
        (rev 0)
+++ haiku/trunk/src/system/kernel/slab/HashedObjectCache.h      2010-01-19 
19:13:25 UTC (rev 35174)
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2008, Axel Dörfler. All Rights Reserved.
+ * Copyright 2007, Hugo Santos. All Rights Reserved.
+ *
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef HASHED_OBJECT_CACHE_H
+#define HASHED_OBJECT_CACHE_H
+
+
+#include <util/OpenHashTable.h>
+
+#include "ObjectCache.h"
+
+
+struct HashedObjectCache : ObjectCache {
+                                                               
HashedObjectCache();
+
+       static  HashedObjectCache*      Create(const char* name, size_t 
object_size,
+                                                                       size_t 
alignment, size_t maximum,
+                                                                       uint32 
flags, void* cookie,
+                                                                       
object_cache_constructor constructor,
+                                                                       
object_cache_destructor destructor,
+                                                                       
object_cache_reclaimer reclaimer);
+
+       virtual slab*                           CreateSlab(uint32 flags,
+                                                                       bool 
unlockWhileAllocating);
+       virtual void                            ReturnSlab(slab* slab);
+       virtual slab*                           ObjectSlab(void* object) const;
+
+       virtual status_t                        PrepareObject(slab* source, 
void* object);
+       virtual void                            UnprepareObject(slab* source, 
void* object);
+
+private:
+                       struct Link {
+                               const void*     buffer;
+                               slab*           parent;
+                               Link*           next;
+                       };
+
+                       struct Definition {
+                               typedef HashedObjectCache       ParentType;
+                               typedef const void*                     KeyType;
+                               typedef Link                            
ValueType;
+
+                               Definition(HashedObjectCache* parent)
+                                       :
+                                       parent(parent)
+                               {
+                               }
+
+                               Definition(const Definition& definition)
+                                       :
+                                       parent(definition.parent)
+                               {
+                               }
+
+                               size_t HashKey(const void* key) const
+                               {
+                                       return (((const uint8*)key) - ((const 
uint8*)0))
+                                               >> parent->lower_boundary;
+                               }
+
+                               size_t Hash(Link* value) const
+                               {
+                                       return HashKey(value->buffer);
+                               }
+
+                               bool Compare(const void* key, Link* value) const
+                               {
+                                       return value->buffer == key;
+                               }
+
+                               Link*& GetLink(Link* value) const
+                               {
+                                       return value->next;
+                               }
+
+                               HashedObjectCache*      parent;
+                       };
+
+                       typedef BOpenHashTable<Definition> HashTable;
+
+private:
+       static  Link*                           _AllocateLink(uint32 flags);
+       static  void                            
_FreeLink(HashedObjectCache::Link* link);
+
+private:
+                       HashTable hash_table;
+                       size_t lower_boundary;
+};
+
+
+
+#endif // HASHED_OBJECT_CACHE_H

Modified: haiku/trunk/src/system/kernel/slab/Jamfile
===================================================================
--- haiku/trunk/src/system/kernel/slab/Jamfile  2010-01-19 18:51:30 UTC (rev 
35173)
+++ haiku/trunk/src/system/kernel/slab/Jamfile  2010-01-19 19:13:25 UTC (rev 
35174)
@@ -3,8 +3,11 @@
 
 KernelMergeObject kernel_slab.o :
        allocator.cpp
+       HashedObjectCache.cpp
+       ObjectCache.cpp
        ObjectDepot.cpp
        Slab.cpp
+       SmallObjectCache.cpp
 
        : $(TARGET_KERNEL_PIC_CCFLAGS)
 ;

Added: haiku/trunk/src/system/kernel/slab/ObjectCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/ObjectCache.cpp                          
(rev 0)
+++ haiku/trunk/src/system/kernel/slab/ObjectCache.cpp  2010-01-19 19:13:25 UTC 
(rev 35174)
@@ -0,0 +1,381 @@
+/*
+ * Copyright 2008, Axel Dörfler. All Rights Reserved.
+ * Copyright 2007, Hugo Santos. All Rights Reserved.
+ *
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include "ObjectCache.h"
+
+#include <string.h>
+
+#include "slab_private.h"
+#include <vm/vm.h>
+#include <vm/VMAddressSpace.h>
+
+
+static const size_t kCacheColorPeriod = 8;
+
+kernel_args* ObjectCache::sKernelArgs = NULL;
+
+
+static void
+object_cache_commit_slab(ObjectCache* cache, slab* slab)
+{
+       void* pages = (void*)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE);
+       if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
+               B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0)
+               panic("failed to create_area()");
+}
+
+
+static void
+object_cache_return_object_wrapper(object_depot* depot, void* cookie,
+       void* object)
+{
+       object_cache_free((ObjectCache*)cookie, object);
+}
+
+
+// #pragma mark -
+
+
+ObjectCache::~ObjectCache()
+{
+}
+
+
+status_t
+ObjectCache::Init(const char* name, size_t objectSize,
+       size_t alignment, size_t maximum, uint32 flags, void* cookie,
+       object_cache_constructor constructor, object_cache_destructor 
destructor,
+       object_cache_reclaimer reclaimer)
+{
+       strlcpy(this->name, name, sizeof(this->name));
+
+       mutex_init(&lock, this->name);
+
+       if (objectSize < sizeof(object_link))
+               objectSize = sizeof(object_link);
+
+       if (alignment > 0 && (objectSize & (alignment - 1)))
+               object_size = objectSize + alignment - (objectSize & (alignment 
- 1));
+       else
+               object_size = objectSize;
+
+       TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
+               object_size);
+
+       cache_color_cycle = 0;
+       total_objects = 0;
+       used_count = 0;
+       empty_count = 0;
+       pressure = 0;
+       min_object_reserve = 0;
+
+       usage = 0;
+       this->maximum = maximum;
+
+       this->flags = flags;
+
+       resize_request = NULL;
+
+       // TODO: depot destruction is obviously broken
+       // no gain in using the depot in single cpu setups
+       //if (smp_get_num_cpus() == 1)
+               this->flags |= CACHE_NO_DEPOT;
+
+       if (!(this->flags & CACHE_NO_DEPOT)) {
+               status_t status = object_depot_init(&depot, flags, this,
+                       object_cache_return_object_wrapper);
+               if (status < B_OK) {
+                       mutex_destroy(&lock);
+                       return status;
+               }
+       }
+
+       this->cookie = cookie;
+       this->constructor = constructor;
+       this->destructor = destructor;
+       this->reclaimer = reclaimer;
+
+       if (this->flags & CACHE_DURING_BOOT) {
+               allocate_pages = &ObjectCache::EarlyAllocatePages;
+               free_pages = &ObjectCache::EarlyFreePages;
+       } else {
+               allocate_pages = &ObjectCache::AllocatePages;
+               free_pages = &ObjectCache::FreePages;
+       }
+
+       return B_OK;
+}
+
+
+void
+ObjectCache::InitPostArea()
+{
+       if (allocate_pages != &ObjectCache::EarlyAllocatePages)
+               return;
+
+       SlabList::Iterator it = full.GetIterator();
+       while (it.HasNext())
+               object_cache_commit_slab(this, it.Next());
+
+       it = partial.GetIterator();
+       while (it.HasNext())
+               object_cache_commit_slab(this, it.Next());
+
+       it = empty.GetIterator();
+       while (it.HasNext())
+               object_cache_commit_slab(this, it.Next());
+
+       allocate_pages = &ObjectCache::AllocatePages;
+       free_pages = &ObjectCache::FreePages;
+}
+
+
+void
+ObjectCache::Delete()
+{
+       this->~ObjectCache();
+       slab_internal_free(this);
+}
+
+
+slab*
+ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount)
+{
+       TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
+               ((uint8*)pages) + byteCount, byteCount);
+
+       slab->pages = pages;
+       slab->count = slab->size = byteCount / object_size;
+       slab->free = NULL;
+       total_objects += slab->size;
+
+       size_t spareBytes = byteCount - (slab->size * object_size);
+       slab->offset = cache_color_cycle;
+
+       if (slab->offset > spareBytes)
+               cache_color_cycle = slab->offset = 0;
+       else
+               cache_color_cycle += kCacheColorPeriod;
+
+       TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
+               slab->size, spareBytes, slab->offset);
+
+       uint8* data = ((uint8*)pages) + slab->offset;
+
+       CREATE_PARANOIA_CHECK_SET(slab, "slab");
+
+       for (size_t i = 0; i < slab->size; i++) {
+               bool failedOnFirst = false;
+
+               status_t status = PrepareObject(slab, data);
+               if (status < B_OK)
+                       failedOnFirst = true;
+               else if (constructor)
+                       status = constructor(cookie, data);
+
+               if (status < B_OK) {
+                       if (!failedOnFirst)
+                               UnprepareObject(slab, data);
+
+                       data = ((uint8*)pages) + slab->offset;
+                       for (size_t j = 0; j < i; j++) {
+                               if (destructor)
+                                       destructor(cookie, data);
+                               UnprepareObject(slab, data);
+                               data += object_size;
+                       }
+
+                       DELETE_PARANOIA_CHECK_SET(slab);
+
+                       return NULL;
+               }
+
+               _push(slab->free, object_to_link(data, object_size));
+
+               ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
+                       &object_to_link(data, object_size)->next, 
sizeof(void*));
+
+               data += object_size;
+       }
+
+       return slab;
+}
+
+
+void
+ObjectCache::UninitSlab(slab* slab)
+{
+       TRACE_CACHE(this, "destruct %p", slab);
+
+       if (slab->count != slab->size)
+               panic("cache: destroying a slab which isn't empty.");
+
+       total_objects -= slab->size;
+
+       DELETE_PARANOIA_CHECK_SET(slab);
+
+       uint8* data = ((uint8*)slab->pages) + slab->offset;
+
+       for (size_t i = 0; i < slab->size; i++) {
+               if (destructor)
+                       destructor(cookie, data);
+               UnprepareObject(slab, data);
+               data += object_size;
+       }
+}
+
+
+status_t
+ObjectCache::PrepareObject(slab* source, void* object)
+{
+       return B_OK;
+}
+
+
+void
+ObjectCache::UnprepareObject(slab* source, void* object)
+{
+}
+
+
+void
+ObjectCache::ReturnObjectToSlab(slab* source, void* object)
+{
+       if (source == NULL) {
+               panic("object_cache: free'd object has no slab");
+               return;
+       }
+
+       ParanoiaChecker _(source);
+
+       object_link* link = object_to_link(object, object_size);
+
+       TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty 
slabs).",
+               object, link, source, source->size - source->count,
+               empty_count);
+
+       _push(source->free, link);
+       source->count++;
+       used_count--;
+
+       ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, 
sizeof(void*));
+
+       if (source->count == source->size) {
+               partial.Remove(source);
+
+               if (empty_count < pressure
+                       && total_objects - used_count - source->size
+                               >= min_object_reserve) {
+                       empty_count++;
+                       empty.Add(source);
+               } else {
+                       ReturnSlab(source);
+               }
+       } else if (source->count == 1) {
+               full.Remove(source);
+               partial.Add(source);
+       }
+}
+
+
+/*static*/ void
+ObjectCache::SetKernelArgs(kernel_args* args)
+{
+       sKernelArgs = args;
+}
+
+
+status_t
+ObjectCache::AllocatePages(void** pages, uint32 flags,
+       bool unlockWhileAllocating)
+{
+       TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags);
+
+       uint32 lock = B_FULL_LOCK;
+       if (this->flags & CACHE_UNLOCKED_PAGES)
+               lock = B_NO_LOCK;
+
+       uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
+       if ((this->flags & CACHE_ALIGN_ON_SIZE) != 0
+               && slab_size != B_PAGE_SIZE)
+               addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
+
+       if (unlockWhileAllocating)
+               Unlock();
+
+       // if we are allocating, it is because we need the pages immediatly
+       // so we lock them. when moving the slab to the empty list we should
+       // unlock them, and lock them again when getting one from the empty 
list.
+       area_id areaId = create_area_etc(VMAddressSpace::KernelID(),
+               name, pages, addressSpec, slab_size, lock,
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
+               (flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
+
+       if (unlockWhileAllocating)
+               Lock();
+
+       if (areaId < 0)
+               return areaId;
+
+       usage += slab_size;
+
+       TRACE_CACHE(this, "  ... = { %ld, %p }", areaId, *pages);
+
+       return B_OK;
+}
+
+
+void
+ObjectCache::FreePages(void* pages)
+{
+       area_id id = area_for(pages);
+
+       TRACE_CACHE(this, "delete pages %p (%ld)", pages, id);
+
+       if (id < 0) {
+               panic("object cache: freeing unknown area");
+               return;
+       }
+
+       delete_area(id);
+
+       usage -= slab_size;
+}
+
+
+status_t
+ObjectCache::EarlyAllocatePages(void** pages, uint32 flags,
+       bool unlockWhileAllocating)
+{
+       TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size,
+               flags);
+
+       if (unlockWhileAllocating)
+               Unlock();
+
+       addr_t base = vm_allocate_early(sKernelArgs, slab_size,
+               slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+
+       if (unlockWhileAllocating)
+               Lock();
+
+       *pages = (void*)base;
+
+       usage += slab_size;
+
+       TRACE_CACHE(this, "  ... = { %p }", *pages);
+
+       return B_OK;
+}
+
+
+void
+ObjectCache::EarlyFreePages(void* pages)
+{
+       panic("memory pressure on bootup?");
+}

Added: haiku/trunk/src/system/kernel/slab/ObjectCache.h
===================================================================
--- haiku/trunk/src/system/kernel/slab/ObjectCache.h                            
(rev 0)
+++ haiku/trunk/src/system/kernel/slab/ObjectCache.h    2010-01-19 19:13:25 UTC 
(rev 35174)
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2008, Axel Dörfler. All Rights Reserved.
+ * Copyright 2007, Hugo Santos. All Rights Reserved.
+ *
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef OBJECT_CACHE_H
+#define OBJECT_CACHE_H
+
+
+#include <lock.h>
+#include <slab/ObjectDepot.h>
+#include <slab/Slab.h>
+#include <util/DoublyLinkedList.h>
+
+
+struct ResizeRequest;
+
+
+struct object_link {
+       struct object_link* next;
+};
+
+struct slab : DoublyLinkedListLinkImpl<slab> {
+       void*                   pages;
+       size_t                  size;           // total number of objects
+       size_t                  count;          // free objects
+       size_t                  offset;
+       object_link*    free;
+};
+
+typedef DoublyLinkedList<slab> SlabList;
+
+struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
+                       char                            name[32];
+                       mutex                           lock;
+                       size_t                          object_size;
+                       size_t                          cache_color_cycle;
+                       SlabList                        empty;
+                       SlabList                        partial;
+                       SlabList                        full;
+                       size_t                          total_objects;          
// total number of objects
+                       size_t                          used_count;             
        // used objects
+                       size_t                          empty_count;            
// empty slabs
+                       size_t                          pressure;
+                       size_t                          min_object_reserve;
+                                                                       // 
minimum number of free objects
+
+                       size_t                          slab_size;
+                       size_t                          usage;
+                       size_t                          maximum;
+                       uint32                          flags;
+
+                       ResizeRequest*          resize_request;
+
+                       void*                           cookie;
+                       object_cache_constructor constructor;
+                       object_cache_destructor destructor;
+                       object_cache_reclaimer reclaimer;
+
+                       status_t                        
(ObjectCache::*allocate_pages)(void** pages,
+                                                                       uint32 
flags, bool unlockWhileAllocating);
+                       void                            
(ObjectCache::*free_pages)(void* pages);
+
+                       object_depot            depot;
+
+public:
+       virtual                                         ~ObjectCache();
+
+                       status_t                        Init(const char* name, 
size_t objectSize,
+                                                                       size_t 
alignment, size_t maximum,
+                                                                       uint32 
flags, void* cookie,
+                                                                       
object_cache_constructor constructor,
+                                                                       
object_cache_destructor destructor,
+                                                                       
object_cache_reclaimer reclaimer);
+                       void                            InitPostArea();
+                       void                            Delete();
+
+       virtual slab*                           CreateSlab(uint32 flags,
+                                                                       bool 
unlockWhileAllocating) = 0;
+       virtual void                            ReturnSlab(slab* slab) = 0;
+       virtual slab*                           ObjectSlab(void* object) const 
= 0;
+
+                       slab*                           InitSlab(slab* slab, 
void* pages,
+                                                                       size_t 
byteCount);
+                       void                            UninitSlab(slab* slab);
+
+       virtual status_t                        PrepareObject(slab* source, 
void* object);
+       virtual void                            UnprepareObject(slab* source, 
void* object);
+
+                       void                            
ReturnObjectToSlab(slab* source, void* object);
+
+                       bool                            Lock()  { return 
mutex_lock(&lock) == B_OK; }
+                       void                            Unlock()        { 
mutex_unlock(&lock); }
+
+       static  void                            SetKernelArgs(kernel_args* 
args);
+                       status_t                        AllocatePages(void** 
pages, uint32 flags,
+                                                                       bool 
unlockWhileAllocating);
+                       void                            FreePages(void* pages);
+                       status_t                        
EarlyAllocatePages(void** pages, uint32 flags,
+                                                                       bool 
unlockWhileAllocating);
+                       void                            EarlyFreePages(void* 
pages);
+
+private:
+       static  kernel_args*            sKernelArgs;
+};
+
+
+static inline void*
+link_to_object(object_link* link, size_t objectSize)
+{
+       return ((uint8*)link) - (objectSize - sizeof(object_link));
+}
+
+
+static inline object_link*
+object_to_link(void* object, size_t objectSize)
+{
+       return (object_link*)(((uint8*)object)
+               + (objectSize - sizeof(object_link)));
+}
+
+
+static inline slab *
+slab_in_pages(const void *pages, size_t slab_size)
+{
+       return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab));
+}
+
+
+static inline const void *
+lower_boundary(void *object, size_t byteCount)
+{
+       const uint8 *null = (uint8 *)NULL;
+       return null + ((((uint8 *)object) - null) & ~(byteCount - 1));
+}
+
+
+static inline bool
+check_cache_quota(ObjectCache *cache)
+{
+       if (cache->maximum == 0)
+               return true;
+
+       return (cache->usage + cache->slab_size) <= cache->maximum;
+}
+
+
+#endif // OBJECT_CACHE_H

Modified: haiku/trunk/src/system/kernel/slab/ObjectDepot.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/ObjectDepot.cpp  2010-01-19 18:51:30 UTC 
(rev 35173)
+++ haiku/trunk/src/system/kernel/slab/ObjectDepot.cpp  2010-01-19 19:13:25 UTC 
(rev 35174)
@@ -19,55 +19,65 @@
        // TODO: Should be dynamically tuned per cache.
 
 
-struct depot_magazine {
-       struct depot_magazine *next;
-       uint16 current_round, round_count;
-       void *rounds[0];
+struct DepotMagazine {
+                       DepotMagazine*          next;
+                       uint16                          current_round;
+                       uint16                          round_count;
+                       void*                           rounds[0];
+
+public:
+       inline  bool                            IsEmpty() const;
+       inline  bool                            IsFull() const;
+
+       inline  void*                           Pop();
+       inline  bool                            Push(void* object);
 };
 
 
 struct depot_cpu_store {
-       recursive_lock lock;
-       struct depot_magazine *loaded, *previous;
+       recursive_lock  lock;
+       DepotMagazine*  loaded;
+       DepotMagazine*  previous;
 };
 
 
-static inline bool
-is_magazine_empty(depot_magazine *magazine)
+bool
+DepotMagazine::IsEmpty() const
 {
-       return magazine->current_round == 0;
+       return current_round == 0;
 }
 
 
-static inline bool
-is_magazine_full(depot_magazine *magazine)
+bool

[... truncated: 1599 lines follow ...]

Other related posts:

  • » [haiku-commits] r35174 - in haiku/trunk: headers/private/kernel/slab src/system/kernel/slab src/system/kernel/vm - ingo_weinhold