[haiku-commits] haiku: hrev53600 - src/system/libroot/posix/malloc_hoard2

  • From: Adrien Destugues <pulkomandy@xxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 24 Nov 2019 09:37:42 -0500 (EST)

hrev53600 adds 2 changesets to branch 'master'
old head: cc7e3a05227d02edd59cf28d5be2c536ea0ede93
new head: 1f391e37ea3dd9f166204c3e658eab29ace4284b
overview: 
https://git.haiku-os.org/haiku/log/?qt=range&q=1f391e37ea3d+%5Ecc7e3a05227d

----------------------------------------------------------------------------

dd2676821a40: Revert "hoard2: Remove."
  
  This reverts commit 3a2175926b20fad519ffa52bc1ecc770a9193480.
  
  A single week is clearly not enough to test a new memory allocator.
  Bring back hoard2 for the time being, we'll consider rpmalloc when it
  actually works better.

1f391e37ea3d: Switch back to hoard2 for now.
  
  We'll reconsider rpmalloc when it has less overhead.

                             [ Adrien Destugues <pulkomandy@xxxxxxxxxxxxx> ]

----------------------------------------------------------------------------

16 files changed, 3762 insertions(+), 1 deletion(-)
src/system/libroot/posix/Jamfile                 |   2 +-
src/system/libroot/posix/malloc_hoard2/Jamfile   |  21 +
.../posix/malloc_hoard2/arch-specific.cpp        | 379 ++++++++++++
.../libroot/posix/malloc_hoard2/arch-specific.h  |  56 ++
src/system/libroot/posix/malloc_hoard2/block.h   | 229 +++++++
src/system/libroot/posix/malloc_hoard2/config.h  |  97 +++
src/system/libroot/posix/malloc_hoard2/heap.cpp  | 460 ++++++++++++++
src/system/libroot/posix/malloc_hoard2/heap.h    | 538 +++++++++++++++++
.../libroot/posix/malloc_hoard2/heapstats.h      | 175 ++++++
.../libroot/posix/malloc_hoard2/processheap.cpp  | 230 +++++++
.../libroot/posix/malloc_hoard2/processheap.h    | 267 +++++++++
.../libroot/posix/malloc_hoard2/superblock.cpp   | 129 ++++
.../libroot/posix/malloc_hoard2/superblock.h     | 297 +++++++++
.../libroot/posix/malloc_hoard2/threadheap.cpp   | 120 ++++
.../libroot/posix/malloc_hoard2/threadheap.h     | 163 +++++
.../libroot/posix/malloc_hoard2/wrapper.cpp      | 600 +++++++++++++++++++

############################################################################

Commit:      dd2676821a40a9dbdf935665960bdce980382325
URL:         https://git.haiku-os.org/haiku/commit/?id=dd2676821a40
Author:      Adrien Destugues <pulkomandy@xxxxxxxxxxxxx>
Date:        Sun Nov 24 14:12:43 2019 UTC

Revert "hoard2: Remove."

This reverts commit 3a2175926b20fad519ffa52bc1ecc770a9193480.

A single week is clearly not enough to test a new memory allocator.
Bring back hoard2 for the time being, we'll consider rpmalloc when it
actually works better.

----------------------------------------------------------------------------

diff --git a/src/system/libroot/posix/malloc/Jamfile 
b/src/system/libroot/posix/malloc/Jamfile
new file mode 100644
index 0000000000..9a14024dd2
--- /dev/null
+++ b/src/system/libroot/posix/malloc/Jamfile
@@ -0,0 +1,21 @@
+SubDir HAIKU_TOP src system libroot posix malloc ;
+
+UsePrivateHeaders libroot shared ;
+
+local architectureObject ;
+for architectureObject in [ MultiArchSubDirSetup ] {
+       on $(architectureObject) {
+               local architecture = $(TARGET_PACKAGING_ARCH) ;
+
+               UsePrivateSystemHeaders ;
+
+               MergeObject <$(architecture)>posix_malloc.o :
+                       arch-specific.cpp
+                       heap.cpp
+                       processheap.cpp
+                       superblock.cpp
+                       threadheap.cpp
+                       wrapper.cpp
+                       ;
+       }
+}
diff --git a/src/system/libroot/posix/malloc/arch-specific.cpp 
b/src/system/libroot/posix/malloc/arch-specific.cpp
new file mode 100644
index 0000000000..7ac64c2a55
--- /dev/null
+++ b/src/system/libroot/posix/malloc/arch-specific.cpp
@@ -0,0 +1,379 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#include "arch-specific.h"
+#include "heap.h"
+
+#include <OS.h>
+#include <Debug.h>
+#include <syscalls.h>
+
+#include <libroot_private.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+
+//#define TRACE_CHUNKS
+#ifdef TRACE_CHUNKS
+#      define CTRACE(x) debug_printf x
+#else
+#      define CTRACE(x) ;
+#endif
+
+using namespace BPrivate;
+
+struct free_chunk {
+       free_chunk      *next;
+       size_t          size;
+};
+
+
+static const size_t kInitialHeapSize = 64 * B_PAGE_SIZE;
+       // that's about what hoard allocates anyway (should be kHeapIncrement
+       // aligned)
+
+static const size_t kHeapIncrement = 16 * B_PAGE_SIZE;
+       // the steps in which to increase the heap size (must be a power of 2)
+
+#if B_HAIKU_64_BIT
+static const addr_t kHeapReservationBase = 0x1000000000;
+static const addr_t kHeapReservationSize = 0x1000000000;
+#else
+static const addr_t kHeapReservationBase = 0x18000000;
+static const addr_t kHeapReservationSize = 0x48000000;
+#endif
+
+static area_id sHeapArea;
+static hoardLockType sHeapLock;
+static void *sHeapBase;
+static addr_t sFreeHeapBase;
+static size_t sFreeHeapSize, sHeapAreaSize;
+static free_chunk *sFreeChunks;
+
+
+void
+__init_after_fork(void)
+{
+       // find the heap area
+       sHeapArea = area_for((void*)sFreeHeapBase);
+       if (sHeapArea < 0) {
+               // Where is it gone?
+               debug_printf("hoard: init_after_fork(): thread %" B_PRId32 ", 
Heap "
+                       "area not found! Base address: %p\n", find_thread(NULL),
+                       sHeapBase);
+               exit(1);
+       }
+}
+
+
+extern "C" status_t
+__init_heap(void)
+{
+       hoardHeap::initNumProcs();
+
+       // This will locate the heap base at 384 MB and reserve the next 1152 MB
+       // for it. They may get reclaimed by other areas, though, but the 
maximum
+       // size of the heap is guaranteed until the space is really needed.
+       sHeapBase = (void *)kHeapReservationBase;
+       status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase,
+               B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize);
+       if (status != B_OK)
+               sHeapBase = NULL;
+
+       uint32 protection = B_READ_AREA | B_WRITE_AREA;
+       if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
+               protection |= B_EXECUTE_AREA;
+       sHeapArea = create_area("heap", (void **)&sHeapBase,
+               status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS,
+               kInitialHeapSize, B_NO_LOCK, protection);
+       if (sHeapArea < B_OK)
+               return sHeapArea;
+
+       sFreeHeapBase = (addr_t)sHeapBase;
+       sHeapAreaSize = kInitialHeapSize;
+
+       hoardLockInit(sHeapLock, "heap");
+
+       return B_OK;
+}
+
+
+extern "C" void
+__heap_terminate_after()
+{
+       // nothing to do
+}
+
+
+static void
+insert_chunk(free_chunk *newChunk)
+{
+       free_chunk *chunk = (free_chunk *)sFreeChunks, *smaller = NULL;
+       for (; chunk != NULL; chunk = chunk->next) {
+               if (chunk->size < newChunk->size)
+                       smaller = chunk;
+               else
+                       break;
+       }
+
+       if (smaller) {
+               newChunk->next = smaller->next;
+               smaller->next = newChunk;
+       } else {
+               newChunk->next = sFreeChunks;
+               sFreeChunks = newChunk;
+       }
+}
+
+
+namespace BPrivate {
+
+void *
+hoardSbrk(long size)
+{
+       assert(size > 0);
+       CTRACE(("sbrk: size = %ld\n", size));
+
+       // align size request
+       size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1);
+
+       // choose correct protection flags
+       uint32 protection = B_READ_AREA | B_WRITE_AREA;
+       if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
+               protection |= B_EXECUTE_AREA;
+
+       hoardLock(sHeapLock);
+
+       // find chunk in free list
+       free_chunk *chunk = sFreeChunks, *last = NULL;
+       for (; chunk != NULL; chunk = chunk->next) {
+               CTRACE(("  chunk %p (%ld)\n", chunk, chunk->size));
+
+               if (chunk->size < (size_t)size) {
+                       last = chunk;
+                       continue;
+               }
+
+               // this chunk is large enough to satisfy the request
+
+               SERIAL_PRINT(("HEAP-%ld: found free chunk to hold %ld bytes\n",
+                       find_thread(NULL), size));
+
+               void *address = (void *)chunk;
+
+               if (chunk->size > (size_t)size + sizeof(free_chunk)) {
+                       // divide this chunk into smaller bits
+                       size_t newSize = chunk->size - size;
+                       free_chunk *next = chunk->next;
+
+                       chunk = (free_chunk *)((addr_t)chunk + size);
+                       chunk->next = next;
+                       chunk->size = newSize;
+
+                       if (last != NULL) {
+                               last->next = next;
+                               insert_chunk(chunk);
+                       } else
+                               sFreeChunks = chunk;
+               } else {
+                       chunk = chunk->next;
+
+                       if (last != NULL)
+                               last->next = chunk;
+                       else
+                               sFreeChunks = chunk;
+               }
+
+               hoardUnlock(sHeapLock);
+               return address;
+       }
+
+       // There was no chunk, let's see if the area is large enough
+
+       size_t oldHeapSize = sFreeHeapSize;
+       sFreeHeapSize += size;
+
+       // round to next heap increment aligned size
+       size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1)
+               & ~(kHeapIncrement - 1);
+
+       if (incrementAlignedSize <= sHeapAreaSize) {
+               SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n",
+                       find_thread(NULL), size));
+               // the area is large enough already
+               hoardUnlock(sHeapLock);
+               return (void *)(sFreeHeapBase + oldHeapSize);
+       }
+
+       // We need to grow the area
+
+       SERIAL_PRINT(("HEAP-%ld: need to resize heap area to %ld (%ld 
requested)\n",
+               find_thread(NULL), incrementAlignedSize, size));
+
+       status_t status = resize_area(sHeapArea, incrementAlignedSize);
+       if (status != B_OK) {
+               // Either the system is out of memory or another area is in the 
way and
+               // prevents ours from being resized. As a special case of the 
latter
+               // the user might have mmap()ed something over malloc()ed 
memory. This
+               // splits the heap area in two, the first one retaining the 
original
+               // area ID. In either case, if there's still memory, it is a 
good idea
+               // to try and allocate a new area.
+               sFreeHeapSize = oldHeapSize;
+
+               if (status == B_NO_MEMORY) {
+                       hoardUnlock(sHeapLock);
+                       return NULL;
+               }
+
+               size_t newHeapSize = (size + kHeapIncrement - 1) / 
kHeapIncrement
+                       * kHeapIncrement;
+
+               // First try at the location directly after the current heap 
area, if
+               // that is still in the reserved memory region.
+               void* base = (void*)(sFreeHeapBase + sHeapAreaSize);
+               area_id area = -1;
+               if (sHeapBase != NULL
+                       && base >= sHeapBase
+                       && (addr_t)base + newHeapSize
+                               <= (addr_t)sHeapBase + kHeapReservationSize) {
+                       area = create_area("heap", &base, B_EXACT_ADDRESS, 
newHeapSize,
+                               B_NO_LOCK, protection);
+
+                       if (area == B_NO_MEMORY) {
+                               hoardUnlock(sHeapLock);
+                               return NULL;
+                       }
+               }
+
+               // If we don't have an area yet, try again with a free location
+               // allocation.
+               if (area < 0) {
+                       base = (void*)(sFreeHeapBase + sHeapAreaSize);
+                       area = create_area("heap", &base, 
B_RANDOMIZED_BASE_ADDRESS,
+                               newHeapSize, B_NO_LOCK, protection);
+               }
+
+               if (area < 0) {
+                       hoardUnlock(sHeapLock);
+                       return NULL;
+               }
+
+               // We have a new area, so make it the new heap area.
+               sHeapArea = area;
+               sFreeHeapBase = (addr_t)base;
+               sHeapAreaSize = newHeapSize;
+               sFreeHeapSize = size;
+               oldHeapSize = 0;
+       } else
+               sHeapAreaSize = incrementAlignedSize;
+
+       hoardUnlock(sHeapLock);
+       return (void *)(sFreeHeapBase + oldHeapSize);
+}
+
+
+void
+hoardUnsbrk(void *ptr, long size)
+{
+       CTRACE(("unsbrk: %p, %ld!\n", ptr, size));
+
+       hoardLock(sHeapLock);
+
+       // TODO: hoard always allocates and frees in typical sizes, so we could
+       //      save a lot of effort if we just had a similar mechanism
+
+       // We add this chunk to our free list - first, try to find an adjacent
+       // chunk, so that we can merge them together
+
+       free_chunk *chunk = (free_chunk *)sFreeChunks, *last = NULL, *smaller = 
NULL;
+       for (; chunk != NULL; chunk = chunk->next) {
+               if ((addr_t)chunk + chunk->size == (addr_t)ptr
+                       || (addr_t)ptr + size == (addr_t)chunk) {
+                       // chunks are adjacent - merge them
+
+                       CTRACE(("  found adjacent chunks: %p, %ld\n", chunk, 
chunk->size));
+                       if (last)
+                               last->next = chunk->next;
+                       else
+                               sFreeChunks = chunk->next;
+
+                       if ((addr_t)chunk < (addr_t)ptr)
+                               chunk->size += size;
+                       else {
+                               free_chunk *newChunk = (free_chunk *)ptr;
+                               newChunk->next = chunk->next;
+                               newChunk->size = size + chunk->size;
+                               chunk = newChunk;
+                       }
+
+                       insert_chunk(chunk);
+                       hoardUnlock(sHeapLock);
+                       return;
+               }
+
+               last = chunk;
+
+               if (chunk->size < (size_t)size)
+                       smaller = chunk;
+       }
+
+       // we didn't find an adjacent chunk, so insert the new chunk into the 
list
+
+       free_chunk *newChunk = (free_chunk *)ptr;
+       newChunk->size = size;
+       if (smaller) {
+               newChunk->next = smaller->next;
+               smaller->next = newChunk;
+       } else {
+               newChunk->next = sFreeChunks;
+               sFreeChunks = newChunk;
+       }
+
+       hoardUnlock(sHeapLock);
+}
+
+
+void
+hoardLockInit(hoardLockType &lock, const char *name)
+{
+       mutex_init_etc(&lock, name, MUTEX_FLAG_ADAPTIVE);
+}
+
+
+void
+hoardLock(hoardLockType &lock)
+{
+       mutex_lock(&lock);
+}
+
+
+void
+hoardUnlock(hoardLockType &lock)
+{
+       mutex_unlock(&lock);
+}
+
+
+void
+hoardYield(void)
+{
+       _kern_thread_yield();
+}
+
+}      // namespace BPrivate
diff --git a/src/system/libroot/posix/malloc/arch-specific.h 
b/src/system/libroot/posix/malloc/arch-specific.h
new file mode 100644
index 0000000000..88760c2e38
--- /dev/null
+++ b/src/system/libroot/posix/malloc/arch-specific.h
@@ -0,0 +1,56 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef _ARCH_SPECIFIC_H_
+#define _ARCH_SPECIFIC_H_
+
+#include "config.h"
+
+#include <new>
+
+#include <OS.h>
+#include <assert.h>
+
+#include <locks.h>
+
+
+// TODO: some kind of adaptive mutex (i.e. trying to spin for a while before
+//  may be a better choice
+typedef mutex hoardLockType;
+
+namespace BPrivate {
+
+///// Lock-related wrappers.
+
+void hoardLockInit(hoardLockType &lock, const char *name);
+void hoardLock(hoardLockType &lock);
+void hoardUnlock(hoardLockType &lock);
+
+///// Memory-related wrapper.
+
+void *hoardSbrk(long size);
+void hoardUnsbrk(void *ptr, long size);
+
+///// Other.
+
+void hoardYield(void);
+
+}      // namespace BPrivate
+
+#endif // _ARCH_SPECIFIC_H_
diff --git a/src/system/libroot/posix/malloc/block.h 
b/src/system/libroot/posix/malloc/block.h
new file mode 100644
index 0000000000..4d60cc1254
--- /dev/null
+++ b/src/system/libroot/posix/malloc/block.h
@@ -0,0 +1,229 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef _BLOCK_H_
+#define _BLOCK_H_
+
+#include "config.h"
+
+//#include <assert.h>
+
+namespace BPrivate {
+
+class superblock;
+
+class block {
+       public:
+               block(superblock * sb)
+                       :
+#if HEAP_DEBUG
+                       _magic(FREE_BLOCK_MAGIC),
+#endif
+                       _next(NULL), _mySuperblock(sb)
+               {
+               }
+
+               block &
+               operator=(const block & b)
+               {
+#if HEAP_DEBUG
+                       _magic = b._magic;
+#endif
+                       _next = b._next;
+                       _mySuperblock = b._mySuperblock;
+#if HEAP_FRAG_STATS
+                       _requestedSize = b._requestedSize;
+#endif
+                       return *this;
+               }
+
+               enum {
+                       ALLOCATED_BLOCK_MAGIC = 0xcafecafe,
+                       FREE_BLOCK_MAGIC = 0xbabebabe
+               };
+
+               // Mark this block as free.
+               inline void markFree(void);
+
+               // Mark this block as allocated.
+               inline void markAllocated(void);
+
+               // Is this block valid? (i.e.,
+               // does it have the right magic number?)
+               inline const int isValid(void) const;
+
+               // Return the block's superblock pointer.
+               inline superblock *getSuperblock(void);
+
+#if HEAP_FRAG_STATS
+               void
+               setRequestedSize(size_t s)
+               {
+                       _requestedSize = s;
+               }
+
+               size_t
+               getRequestedSize(void)
+               {
+                       return _requestedSize;
+               }
+#endif
+
+#if USE_PRIVATE_HEAPS
+               void
+               setActualSize(size_t s)
+               {
+                       _actualSize = s;
+               }
+
+               size_t
+               getActualSize(void)
+               {
+                       return _actualSize;
+               }
+#endif
+               void
+               setNext(block * b)
+               {
+                       _next = b;
+               }
+
+               block *
+               getNext(void)
+               {
+                       return _next;
+               }
+
+#if HEAP_LEAK_CHECK
+               void
+               setCallStack(int index, void *address)
+               {
+                       _callStack[index] = address;
+               }
+               
+               void *
+               getCallStack(int index)
+               {
+                       return _callStack[index];
+               }
+               
+               void
+               setAllocatedSize(size_t size)
+               {
+                       _allocatedSize = size;
+               }
+               
+               size_t
+               getAllocatedSize()
+               {
+                       return _allocatedSize;
+               }
+#endif
+
+       private:
+#if USE_PRIVATE_HEAPS
+#if HEAP_DEBUG
+               union {
+                       unsigned long _magic;
+                       double _d1;                             // For 
alignment.
+               };
+#endif
+
+               block *_next;                           // The next block in a 
linked-list of blocks.
+               size_t _actualSize;                     // The actual size of 
the block.
+
+               union {
+                       double _d2;                             // For 
alignment.
+                       superblock *_mySuperblock;      // A pointer to my 
superblock.
+               };
+#else // ! USE_PRIVATE_HEAPS
+
+#if HEAP_DEBUG
+               union {
+                       unsigned long _magic;
+                       double _d3;                             // For 
alignment.
+               };
+#endif
+
+               block *_next;                           // The next block in a 
linked-list of blocks.
+               superblock *_mySuperblock;      // A pointer to my superblock.
+#endif // USE_PRIVATE_HEAPS
+
+#if HEAP_LEAK_CHECK
+               void *_callStack[HEAP_CALL_STACK_SIZE];
+               size_t _allocatedSize;
+#endif
+
+#if HEAP_FRAG_STATS
+               union {
+                       double _d4;                             // This is just 
for alignment purposes.
+                       size_t _requestedSize;  // The amount of space 
requested (vs. allocated).
+               };
+#endif
+
+               // Disable copying.
+               block(const block &);
+};
+
+
+superblock *
+block::getSuperblock(void)
+{
+#if HEAP_DEBUG
+       assert(isValid());
+#endif
+
+       return _mySuperblock;
+}
+
+
+void
+block::markFree(void)
+{
+#if HEAP_DEBUG
+       assert(_magic == ALLOCATED_BLOCK_MAGIC);
+       _magic = FREE_BLOCK_MAGIC;
+#endif
+}
+
+
+void
+block::markAllocated(void)
+{
+#if HEAP_DEBUG
+       assert(_magic == FREE_BLOCK_MAGIC);
+       _magic = ALLOCATED_BLOCK_MAGIC;
+#endif
+}
+
+
+const int
+block::isValid(void) const
+{
+#if HEAP_DEBUG
+       return _magic == FREE_BLOCK_MAGIC
+               || _magic == ALLOCATED_BLOCK_MAGIC;
+#else
+       return 1;
+#endif
+}
+
+}      // namespace BPrivate
+
+#endif // _BLOCK_H_
diff --git a/src/system/libroot/posix/malloc/config.h 
b/src/system/libroot/posix/malloc/config.h
new file mode 100644
index 0000000000..018f4a8642
--- /dev/null
+++ b/src/system/libroot/posix/malloc/config.h
@@ -0,0 +1,97 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef _CONFIG_H_
+#define _CONFIG_H_
+
+#ifndef _REENTRANT
+#      define _REENTRANT               // If defined, generate a 
multithreaded-capable version.
+#endif
+
+#ifndef USER_LOCKS
+#      define USER_LOCKS 1             // Use our own user-level locks if 
they're available for the current architecture.
+#endif
+
+#define HEAP_LOG 0             // If non-zero, keep a log of heap accesses.
+
+
+///// You should not change anything below here. /////
+
+
+// The base of the exponential used for size classes.
+// An object is in size class i if
+//        base^(b+i-1) * ALIGNMENT < size <= base^(b+i) * ALIGNMENT,
+// where b = log_base(ALIGNMENT).
+// Note that this puts an upper-limit on internal fragmentation:
+//   if SIZE_CLASS_BASE is 1.2, then we will never see more than
+//   20% internal fragmentation (for aligned requests).
+
+#define SIZE_CLASS_BASE 1.2
+#define MAX_INTERNAL_FRAGMENTATION 2
+
+// The number of groups of superblocks we maintain based on what
+// fraction of the superblock is empty. NB: This number must be at
+// least 2, and is 1 greater than the EMPTY_FRACTION in heap.h.
+
+enum { SUPERBLOCK_FULLNESS_GROUP = 9 };
+
+
+// DO NOT CHANGE THESE.  They require running of maketable to replace
+// the values in heap.cpp for the _numBlocks array.
+
+#define HEAP_DEBUG 0           // If non-zero, keeps extra info for sanity 
checking.
+#define HEAP_STATS 0           // If non-zero, maintain blowup statistics.
+#define HEAP_FRAG_STATS 0      // If non-zero, maintain fragmentation 
statistics.
+
+// A simple (and slow) leak checker
+#define HEAP_LEAK_CHECK 0
+#define HEAP_CALL_STACK_SIZE 8
+
+// A simple wall checker
+#define HEAP_WALL 0
+#define HEAP_WALL_SIZE 32
+
+// CACHE_LINE = The number of bytes in a cache line.
+
+#if defined(i386) || defined(WIN32)
+#      define CACHE_LINE 32
+#endif
+
+#ifdef sparc
+#      define CACHE_LINE 64
+#endif
+
+#ifdef __sgi
+#      define CACHE_LINE 128
+#endif
+
+#ifndef CACHE_LINE
+// We don't know what the architecture is,
+// so go for the gusto.
+#define CACHE_LINE 64
+#endif
+
+#ifdef __GNUG__
+// Use the max operator, an extension to C++ found in GNU C++.
+#      define MAX(a,b) ((a) >? (b))
+#else
+#      define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+
+#endif // _CONFIG_H_
diff --git a/src/system/libroot/posix/malloc/heap.cpp 
b/src/system/libroot/posix/malloc/heap.cpp
new file mode 100644
index 0000000000..e2cc553a17
--- /dev/null
+++ b/src/system/libroot/posix/malloc/heap.cpp
@@ -0,0 +1,460 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#include "config.h"
+
+#include "heap.h"
+#include "processheap.h"
+#include "superblock.h"
+
+using namespace BPrivate;
+
+// NB: Use maketable.cpp to update this
+//     if SIZE_CLASSES, ALIGNMENT, SIZE_CLASS_BASE, MAX_EMPTY_SUPERBLOCKS,
+//     or SUPERBLOCK_SIZE changes.
+
+#if (MAX_INTERNAL_FRAGMENTATION == 2)
+
+size_t hoardHeap::_sizeTable[hoardHeap::SIZE_CLASSES] = {
+       8UL, 16UL, 24UL, 32UL, 40UL, 48UL, 56UL, 72UL, 80UL, 96UL, 120UL, 144UL,
+       168UL, 200UL, 240UL, 288UL, 344UL, 416UL, 496UL, 592UL, 712UL, 856UL,
+       1024UL, 1232UL, 1472UL, 1768UL, 2120UL, 2544UL, 3048UL, 3664UL,
+       4392UL, 5272UL, 6320UL, 7584UL, 9104UL, 10928UL, 13112UL, 15728UL,
+       18872UL, 22648UL, 27176UL, 32616UL, 39136UL, 46960UL, 56352UL,
+       67624UL, 81144UL, 97376UL, 116848UL, 140216UL, 168256UL, 201904UL,
+       242288UL, 290744UL, 348896UL, 418672UL, 502408UL, 602888UL, 723464UL,
+       868152UL, 1041784UL, 1250136UL, 1500160UL, 1800192UL, 2160232UL,
+       2592280UL, 3110736UL, 3732880UL, 4479456UL, 5375344UL, 6450408UL,
+       7740496UL, 9288592UL, 11146312UL, 13375568UL, 16050680UL, 19260816UL,
+       23112984UL, 27735576UL, 33282688UL, 39939224UL, 47927072UL,
+       57512488UL, 69014984UL, 82817976UL, 99381576UL, 119257888UL,
+       143109472UL, 171731360UL, 206077632UL, 247293152UL, 296751776UL,
+       356102144UL, 427322560UL, 512787072UL, 615344512UL, 738413376UL,
+       886096064UL, 1063315264UL
+};
+
+size_t hoardHeap::_threshold[hoardHeap::SIZE_CLASSES] = {
+       4096UL, 2048UL, 1364UL, 1024UL, 816UL, 680UL, 584UL, 452UL, 408UL,
+       340UL, 272UL, 224UL, 192UL, 160UL, 136UL, 112UL, 92UL, 76UL, 64UL,
+       52UL, 44UL, 36UL, 32UL, 24UL, 20UL, 16UL, 12UL, 12UL, 8UL, 8UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL
+};
+
+#elif (MAX_INTERNAL_FRAGMENTATION == 6)
+
+size_t hoardHeap::_sizeTable[hoardHeap::SIZE_CLASSES] = {
+       8UL, 16UL, 24UL, 32UL, 48UL, 72UL, 112UL, 176UL, 288UL, 456UL, 728UL,
+       1160UL, 1848UL, 2952UL, 4728UL, 7560UL, 12096UL, 19344UL, 30952UL,
+       49520UL, 79232UL, 126768UL, 202832UL, 324520UL, 519232UL, 830768UL,
+       1329232UL, 2126768UL, 3402824UL, 5444520UL, 8711232UL, 13937968UL,
+       22300752UL, 35681200UL, 57089912UL, 91343856UL, 146150176UL,
+       233840256UL, 374144416UL, 598631040UL, 957809728UL, 1532495488UL
+};
+
+size_t hoardHeap::_threshold[hoardHeap::SIZE_CLASSES] = {
+       4096UL, 2048UL, 1364UL, 1024UL, 680UL, 452UL, 292UL, 184UL, 112UL, 68UL,
+       44UL, 28UL, 16UL, 8UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL
+};
+
+#elif (MAX_INTERNAL_FRAGMENTATION == 10)
+
+size_t hoardHeap::_sizeTable[hoardHeap::SIZE_CLASSES] = {
+       8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
+       8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
+       1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
+       67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
+       2147483648UL
+};
+
+size_t hoardHeap::_threshold[hoardHeap::SIZE_CLASSES] = {
+       4096UL, 2048UL, 1024UL, 512UL, 256UL, 128UL, 64UL, 32UL, 16UL, 8UL, 4UL,
+       4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL, 4UL,
+       4UL, 4UL, 4UL, 4UL
+};
+
+#else
+#      error "Undefined size class base."
+#endif
+
+
+int hoardHeap::fMaxThreadHeaps = 1;
+int hoardHeap::_numProcessors;
+int hoardHeap::_numProcessorsMask;
+
+
+// Return ceil(log_2(num)).
+// num must be positive.
+
+static int
+lg(int num)
+{
+       assert(num > 0);
+       int power = 0;
+       int n = 1;
+       // Invariant: 2^power == n.
+       while (n < num) {
+               n <<= 1;
+               power++;
+       }
+       return power;
+}
+
+
+//     #pragma mark -
+
+
+hoardHeap::hoardHeap(void)
+       :
+       _index(0), _reusableSuperblocks(NULL), _reusableSuperblocksCount(0)
+#if HEAP_DEBUG
+       , _magic(HEAP_MAGIC)
+#endif
+{
+       initLock();
+
+       for (int i = 0; i < SUPERBLOCK_FULLNESS_GROUP; i++) {
+               for (int j = 0; j < SIZE_CLASSES; j++) {
+                       // Initialize all superblocks lists to empty.
+                       _superblocks[i][j] = NULL;
+               }
+       }
+
+       for (int k = 0; k < SIZE_CLASSES; k++) {
+               _leastEmptyBin[k] = 0;
+       }
+}
+
+
+void
+hoardHeap::insertSuperblock(int sizeclass,
+       superblock *sb, processHeap *pHeap)
+{
+       assert(sb->isValid());
+       assert(sb->getBlockSizeClass() == sizeclass);
+       assert(sb->getPrev() == NULL);
+       assert(sb->getNext() == NULL);
+       assert(_magic == HEAP_MAGIC);
+
+       // Now it's ours.
+       sb->setOwner(this);
+
+       // How full is this superblock?  We'll use this information to put
+       // it into the right 'bin'.
+       sb->computeFullness();
+       int fullness = sb->getFullness();
+
+       // Update the stats.
+       incStats(sizeclass, sb->getNumBlocks() - sb->getNumAvailable(),
+               sb->getNumBlocks());
+
+       if (fullness == 0
+               && sb->getNumBlocks() > 1
+               && sb->getNumBlocks() == sb->getNumAvailable()) {
+               // Recycle this superblock.
+#if 0
+               removeSuperblock(sb, sizeclass);
+               // Update the stats.
+               decStats(sizeclass,
+                                sb->getNumBlocks() - sb->getNumAvailable(), 
sb->getNumBlocks());
+               // Free it immediately.
+               const size_t s = sizeFromClass(sizeclass);
+               const int blksize = align(sizeof(block) + s);
+#if HEAP_LOG
+               // Record the memory deallocation.
+               MemoryRequest m;
+               m.deallocate((int)sb->getNumBlocks() *
+                                        
(int)sizeFromClass(sb->getBlockSizeClass()));
+               pHeap->getLog(getIndex()).append(m);
+#endif
+#if HEAP_FRAG_STATS
+
+               pHeap->setDeallocated(0, sb->getNumBlocks() * 
sizeFromClass(sb->getBlockSizeClass()));
+#endif
+
+               hoardUnsbrk(sb, align(sizeof(superblock) + blksize));
+#else
+               recycle(sb);
+#endif
+       } else {
+               // Insert it into the appropriate list.
+               superblock *&head = _superblocks[fullness][sizeclass];
+               sb->insertBefore(head);
+               head = sb;
+               assert(head->isValid());
+
+               // Reset the least-empty bin counter.
+               _leastEmptyBin[sizeclass] = RESET_LEAST_EMPTY_BIN;
+       }
+}
+
+
+superblock *
+hoardHeap::removeMaxSuperblock(int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+
+       superblock *head = NULL;
+
+       // First check the reusable superblocks list.
+
+       head = reuse(sizeclass);
+       if (head) {
+               // We found one. Since we're removing this superblock, update 
the
+               // stats accordingly.
+               decStats(sizeclass,
+                       head->getNumBlocks() - head->getNumAvailable(),
+                       head->getNumBlocks());
+
+               return head;
+       }
+
+       // Instead of finding the superblock with the most available space
+       // (something that would either involve a linear scan through the
+       // superblocks or maintaining the superblocks in sorted order), we
+       // just pick one that is no more than
+       // 1/(SUPERBLOCK_FULLNESS_GROUP-1) more full than the superblock
+       // with the most available space.  We start with the emptiest group.
+
+       int i = 0;
+
+       // Note: the last group (SUPERBLOCK_FULLNESS_GROUP - 1) is full, so
+       // we never need to check it. But for robustness, we leave it in.
+       while (i < SUPERBLOCK_FULLNESS_GROUP) {
+               head = _superblocks[i][sizeclass];
+               if (head)
+                       break;
+
+               i++;
+       }
+
+       if (!head)
+               return NULL;
+
+       // Make sure that this superblock is at least 1/EMPTY_FRACTION
+       // empty.
+       assert(head->getNumAvailable() * EMPTY_FRACTION >= 
head->getNumBlocks());
+
+       removeSuperblock(head, sizeclass);
+
+       assert(head->isValid());
+       assert(head->getPrev() == NULL);
+       assert(head->getNext() == NULL);
+       return head;
+}
+
+
+void
+hoardHeap::removeSuperblock(superblock *sb, int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+
+       assert(sb->isValid());
+       assert(sb->getOwner() == this);
+       assert(sb->getBlockSizeClass() == sizeclass);
+
+       for (int i = 0; i < SUPERBLOCK_FULLNESS_GROUP; i++) {
+               if (sb == _superblocks[i][sizeclass]) {
+                       _superblocks[i][sizeclass] = sb->getNext();
+                       if (_superblocks[i][sizeclass] != NULL) {
+                               assert(_superblocks[i][sizeclass]->isValid());
+                       }
+                       break;
+               }
+       }
+
+       sb->remove();
+       decStats(sizeclass, sb->getNumBlocks() - sb->getNumAvailable(),
+               sb->getNumBlocks());
+}
+
+
+void
+hoardHeap::moveSuperblock(superblock *sb,
+       int sizeclass, int fromBin, int toBin)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(sb->isValid());
+       assert(sb->getOwner() == this);
+       assert(sb->getBlockSizeClass() == sizeclass);
+       assert(sb->getFullness() == toBin);
+
+       // Remove the superblock from the old bin.
+
+       superblock *&oldHead = _superblocks[fromBin][sizeclass];
+       if (sb == oldHead) {
+               oldHead = sb->getNext();
+               if (oldHead != NULL) {
+                       assert(oldHead->isValid());
+               }
+       }
+
+       sb->remove();
+
+       // Insert the superblock into the new bin.
+
+       superblock *&newHead = _superblocks[toBin][sizeclass];
+       sb->insertBefore(newHead);
+       newHead = sb;
+       assert(newHead->isValid());
+
+       // Reset the least-empty bin counter.
+       _leastEmptyBin[sizeclass] = RESET_LEAST_EMPTY_BIN;
+}
+
+
+// The heap lock must be held when this procedure is called.
+
+int
+hoardHeap::freeBlock(block * &b, superblock * &sb,
+       int sizeclass, processHeap *pHeap)
+{
+       assert(sb->isValid());
+       assert(b->isValid());
+       assert(this == sb->getOwner());
+
+       const int oldFullness = sb->getFullness();
+       sb->putBlock(b);
+       decUStats(sizeclass);
+       const int newFullness = sb->getFullness();
+
+       // Free big superblocks.
+       if (sb->getNumBlocks() == 1) {
+               removeSuperblock(sb, sizeclass);
+               const size_t s = sizeFromClass(sizeclass);
+               const int blksize = align(sizeof(block) + s);
+#if HEAP_LOG
+               // Record the memory deallocation.
+               MemoryRequest m;
+               m.deallocate((int)sb->getNumBlocks()
+                       * (int)sizeFromClass(sb->getBlockSizeClass()));
+               pHeap->getLog(getIndex()).append(m);
+#endif
+#if HEAP_FRAG_STATS
+               pHeap->setDeallocated(0,
+                       sb->getNumBlocks() * 
sizeFromClass(sb->getBlockSizeClass()));
+#endif
+               hoardUnsbrk(sb, align(sizeof(superblock) + blksize));
+               return 1;
+       }
+
+       // If the fullness value has changed, move the superblock.
+       if (newFullness != oldFullness) {
+               moveSuperblock(sb, sizeclass, oldFullness, newFullness);
+       } else {
+               // Move the superblock to the front of its list (to reduce
+               // paging).
+               superblock *&head = _superblocks[newFullness][sizeclass];
+               if (sb != head) {
+                       sb->remove();
+                       sb->insertBefore(head);
+                       head = sb;
+               }
+       }
+
+       // If the superblock is now empty, recycle it.
+
+       if ((newFullness == 0) && (sb->getNumBlocks() == 
sb->getNumAvailable())) {
+               removeSuperblock(sb, sizeclass);
+#if 0
+               // Free it immediately.
+               const size_t s = sizeFromClass(sizeclass);
+               const int blksize = align(sizeof(block) + s);
+#if HEAP_LOG
+               // Record the memory deallocation.
+               MemoryRequest m;
+               m.deallocate((int)sb->getNumBlocks()
+                       * (int)sizeFromClass(sb->getBlockSizeClass()));
+               pHeap->getLog(getIndex()).append(m);
+#endif
+#if HEAP_FRAG_STATS
+               pHeap->setDeallocated(0,
+                       sb->getNumBlocks() * 
sizeFromClass(sb->getBlockSizeClass()));
+#endif
+
+               hoardUnsbrk(sb, align(sizeof(superblock) + blksize));
+               return 1;
+#else
+               recycle(sb);
+               // Update the stats.  This restores the stats to their state
+               // before the call to removeSuperblock, above.
+               incStats(sizeclass,
+                       sb->getNumBlocks() - sb->getNumAvailable(), 
sb->getNumBlocks());
+#endif
+       }
+
+       // If this is the process heap, then we're done.
+       if (this == (hoardHeap *)pHeap)
+               return 0;
+
+       //
+       // Release a superblock, if necessary.
+       //
+
+       //
+       // Check to see if the amount free exceeds the release threshold
+       // (two superblocks worth of blocks for a given sizeclass) and if
+       // the heap is sufficiently empty.
+       //
+
+       // We never move anything to the process heap if we're on a
+       // uniprocessor.
+       if (_numProcessors > 1) {
+               int inUse, allocated;
+               getStats(sizeclass, inUse, allocated);
+               if ((inUse < allocated - getReleaseThreshold(sizeclass))
+                       && (EMPTY_FRACTION * inUse <
+                               EMPTY_FRACTION * allocated - allocated)) {
+
+                       // We've crossed the magical threshold. Find the 
superblock with
+                       // the most free blocks and give it to the process heap.
+                       superblock *const maxSb = 
removeMaxSuperblock(sizeclass);
+                       assert(maxSb != NULL);
+
+                       // Update the statistics.
+
+                       assert(maxSb->getNumBlocks() >= 
maxSb->getNumAvailable());
+
+                       // Give the superblock back to the process heap.
+                       pHeap->release(maxSb);
+               }
+       }
+
+       return 0;
+}
+
+
+void
+hoardHeap::initNumProcs(void)
+{
+       system_info info;
+       if (get_system_info(&info) != B_OK)
+               hoardHeap::_numProcessors = 1;
+       else
+               hoardHeap::_numProcessors = info.cpu_count;
+
+       fMaxThreadHeaps = 1 << (lg(_numProcessors) + 1);
+       _numProcessorsMask = fMaxThreadHeaps - 1;
+}
+
diff --git a/src/system/libroot/posix/malloc/heap.h 
b/src/system/libroot/posix/malloc/heap.h
new file mode 100644
index 0000000000..42e9eb456e
--- /dev/null
+++ b/src/system/libroot/posix/malloc/heap.h
@@ -0,0 +1,538 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+/* hoardHeap, the base class for threadHeap and processHeap. */
+
+#ifndef _HEAP_H_
+#define _HEAP_H_
+
+#include <OS.h>
+
+#include "config.h"
+
+#include "arch-specific.h"
+#include "superblock.h"
+#include "heapstats.h"
+
+
+namespace BPrivate {
+
+class processHeap;
+
+class hoardHeap {
+       public:
+               hoardHeap(void);
+
+               // A superblock that holds more than one object must hold at 
least
+               // this many bytes.
+               enum { SUPERBLOCK_SIZE = 8192 };
+
+               // A thread heap must be at least 1/EMPTY_FRACTION empty before 
we
+               // start returning superblocks to the process heap.
+               enum { EMPTY_FRACTION = SUPERBLOCK_FULLNESS_GROUP - 1 };
+
+               // Reset value for the least-empty bin.  The last bin
+               // (SUPERBLOCK_FULLNESS_GROUP-1) is for completely full 
superblocks,
+               // so we use the next-to-last bin.
+               enum { RESET_LEAST_EMPTY_BIN = SUPERBLOCK_FULLNESS_GROUP - 2 };
+
+               // The number of empty superblocks that we allow any thread 
heap to
+               // hold once the thread heap has fallen below 1/EMPTY_FRACTION
+               // empty.
+               enum { MAX_EMPTY_SUPERBLOCKS = EMPTY_FRACTION };
+
+               //
+               // The number of size classes.  This combined with the
+               // SIZE_CLASS_BASE determine the maximum size of an object.
+               //
+               // NB: Once this is changed, you must execute maketable.cpp and 
put
+               // the generated values into heap.cpp.
+
+#if MAX_INTERNAL_FRAGMENTATION == 2
+               enum { SIZE_CLASSES = 115 };
+#elif MAX_INTERNAL_FRAGMENTATION == 6
+               enum { SIZE_CLASSES = 46 };
+#elif MAX_INTERNAL_FRAGMENTATION == 10
+               enum { SIZE_CLASSES = 32 };
+#else
+#      error "Undefined size class base."
+#endif
+
+               // Every object is aligned so that it can always hold any type.
+#ifdef __x86_64__
+               enum { ALIGNMENT = 16 };
+#else          
+               enum { ALIGNMENT = sizeof(double) };
+#endif
+
+               // ANDing with this rounds to ALIGNMENT.
+               enum { ALIGNMENT_MASK = ALIGNMENT - 1 };
+
+               // Used for sanity checking.
+               enum { HEAP_MAGIC = 0x0badcafe };
+
+               // Get the usage and allocated statistics.
+               inline void getStats(int sizeclass, int &U, int &A);
+
+
+#if HEAP_STATS
+               // How much is the maximum ever in use for this size class?
+               inline int maxInUse(int sizeclass);
+
+               // How much is the maximum memory allocated for this size class?
+               inline int maxAllocated(int sizeclass);
+#endif
+
+               // Insert a superblock into our list.
+               void insertSuperblock(int sizeclass, superblock *sb, 
processHeap *pHeap);
+
+               // Remove the superblock with the most free space.
+               superblock *removeMaxSuperblock(int sizeclass);
+
+               // Find an available superblock (i.e., with some space in it).
+               inline superblock *findAvailableSuperblock(int sizeclass,
+                                                               block * &b, 
processHeap * pHeap);
+
+               // Lock this heap.
+               inline void lock(void);
+
+               // Unlock this heap.
+               inline void unlock(void);
+
+               // Init this heap lock.
+               inline void initLock(void);
+
+               // Set our index number (which heap we are).
+               inline void setIndex(int i);
+
+               // Get our index number (which heap we are).
+               inline int getIndex(void);
+
+               // Free a block into a superblock.
+               // This is used by processHeap::free().
+               // Returns 1 iff the superblock was munmapped.
+               int freeBlock(block * &b, superblock * &sb, int sizeclass,
+                               processHeap * pHeap);
+
+               //// Utility functions ////
+
+               // Return the size class for a given size.
+               inline static int sizeClass(const size_t sz);
+
+               // Return the size corresponding to a given size class.
+               inline static size_t sizeFromClass(const int sizeclass);
+
+               // Return the release threshold corresponding to a given size 
class.
+               inline static int getReleaseThreshold(const int sizeclass);
+
+               // Return how many blocks of a given size class fit into a 
superblock.
+               inline static int numBlocks(const int sizeclass);
+
+               // Align a value.
+               inline static size_t align(const size_t sz);
+
+       private:
+               // Disable copying and assignment.
+
+               hoardHeap(const hoardHeap &);
+               const hoardHeap & operator=(const hoardHeap &);
+
+               // Recycle a superblock.
+               inline void recycle(superblock *);
+
+               // Reuse a superblock (if one is available).
+               inline superblock *reuse(int sizeclass);
+
+               // Remove a particular superblock.
+               void removeSuperblock(superblock *, int sizeclass);
+
+               // Move a particular superblock from one bin to another.
+               void moveSuperblock(superblock *,
+                                                       int sizeclass, int 
fromBin, int toBin);
+
+               // Update memory in-use and allocated statistics.
+               // (*UStats = just update U.)
+               inline void incStats(int sizeclass, int updateU, int updateA);
+               inline void incUStats(int sizeclass);
+
+               inline void decStats(int sizeclass, int updateU, int updateA);
+               inline void decUStats(int sizeclass);
+
+               //// Members ////
+
+               // Heap statistics.
+               heapStats _stats[SIZE_CLASSES];
+
+               // The per-heap lock.
+               hoardLockType _lock;
+
+               // Which heap this is (0 = the process (global) heap).
+               int _index;
+
+               // Reusable superblocks.
+               superblock *_reusableSuperblocks;
+               int _reusableSuperblocksCount;
+
+               // Lists of superblocks.
+               superblock 
*_superblocks[SUPERBLOCK_FULLNESS_GROUP][SIZE_CLASSES];
+
+               // The current least-empty superblock bin.
+               int _leastEmptyBin[SIZE_CLASSES];
+
+#if HEAP_DEBUG
+               // For sanity checking.
+               const unsigned long _magic;
+#else
+#      define _magic HEAP_MAGIC
+#endif
+
+               // The lookup table for size classes.
+               static size_t _sizeTable[SIZE_CLASSES];
+
+               // The lookup table for release thresholds.
+               static size_t _threshold[SIZE_CLASSES];
+
+       public:
+               static void initNumProcs(void);
+
+       protected:
+               // The maximum number of thread heaps we allow.  (NOT the 
maximum
+               // number of threads -- Hoard imposes no such limit.)  This 
must be
+               // a power of two! NB: This number is twice the maximum number 
of
+               // PROCESSORS supported by Hoard.
+               static int fMaxThreadHeaps;
+
+               // number of CPUs, cached
+               static int _numProcessors;
+               static int _numProcessorsMask;
+};
+
+
+
+void
+hoardHeap::incStats(int sizeclass, int updateU, int updateA)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(updateU >= 0);
+       assert(updateA >= 0);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       _stats[sizeclass].incStats(updateU, updateA);
+}
+
+
+void
+hoardHeap::incUStats(int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       _stats[sizeclass].incUStats();
+}
+
+
+void
+hoardHeap::decStats(int sizeclass, int updateU, int updateA)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(updateU >= 0);
+       assert(updateA >= 0);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       _stats[sizeclass].decStats(updateU, updateA);
+}
+
+
+void
+hoardHeap::decUStats(int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       _stats[sizeclass].decUStats();
+}
+
+
+void
+hoardHeap::getStats(int sizeclass, int &U, int &A)
+{
+       assert(_magic == HEAP_MAGIC);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       _stats[sizeclass].getStats(U, A);
+}
+
+
+#if HEAP_STATS
+int
+hoardHeap::maxInUse(int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+       return _stats[sizeclass].getUmax();
+}
+
+
+int
+hoardHeap::maxAllocated(int sizeclass)
+{
+       assert(_magic == HEAP_MAGIC);
+       return _stats[sizeclass].getAmax();
+}
+#endif // HEAP_STATS
+
+
+superblock *
+hoardHeap::findAvailableSuperblock(int sizeclass,
+       block * &b, processHeap * pHeap)
+{
+       assert(this);
+       assert(_magic == HEAP_MAGIC);
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+
+       superblock *sb = NULL;
+       int reUsed = 0;
+
+       // Look through the superblocks, starting with the almost-full ones
+       // and going to the emptiest ones.  The Least Empty Bin for a
+       // sizeclass is a conservative approximation (fixed after one
+       // iteration) of the first bin that has superblocks in it, starting
+       // with (surprise) the least-empty bin.
+
+       for (int i = _leastEmptyBin[sizeclass]; i >= 0; i--) {
+               sb = _superblocks[i][sizeclass];
+               if (sb == NULL) {
+                       if (i == _leastEmptyBin[sizeclass]) {
+                               // There wasn't a superblock in this bin,
+                               // so we adjust the least empty bin.
+                               _leastEmptyBin[sizeclass]--;
+                       }
+               } else if (sb->getNumAvailable() > 0) {
+                       assert(sb->getOwner() == this);
+                       break;
+               }
+               sb = NULL;
+       }
+
+#if 1
+       if (sb == NULL) {
+               // Try to reuse a superblock.
+               sb = reuse(sizeclass);
+               if (sb) {
+                       assert(sb->getOwner() == this);
+                       reUsed = 1;
+               }
+       }
+#endif
+
+       if (sb != NULL) {
+               // Sanity checks:
+               //   This superblock is 'valid'.
+               assert(sb->isValid());
+               //   This superblock has the right ownership.
+               assert(sb->getOwner() == this);
+
+               int oldFullness = sb->getFullness();
+
+               // Now get a block from the superblock.
+               // This superblock must have space available.
+               b = sb->getBlock();
+               assert(b != NULL);
+
+               // Update the stats.
+               incUStats(sizeclass);
+
+               if (reUsed) {
+                       insertSuperblock(sizeclass, sb, pHeap);
+                       // Fix the stats (since insert will just have 
incremented them
+                       // by this amount).
+                       decStats(sizeclass,
+                                        sb->getNumBlocks() - 
sb->getNumAvailable(),
+                                        sb->getNumBlocks());
+               } else {
+                       // If we've crossed a fullness group,
+                       // move the superblock.
+                       int fullness = sb->getFullness();
+
+                       if (fullness != oldFullness) {
+                               // Move the superblock.
+                               moveSuperblock(sb, sizeclass, oldFullness, 
fullness);
+                       }
+               }
+       }
+       // Either we didn't find a superblock or we did and got a block.
+       assert((sb == NULL) || (b != NULL));
+       // Either we didn't get a block or we did and we also got a superblock.
+       assert((b == NULL) || (sb != NULL));
+
+       return sb;
+}
+
+
+int
+hoardHeap::sizeClass(const size_t sz)
+{
+       // Find the size class for a given object size
+       // (the smallest i such that _sizeTable[i] >= sz).
+       int sizeclass = 0;
+       while (_sizeTable[sizeclass] < sz) {
+               sizeclass++;
+               assert(sizeclass < SIZE_CLASSES);
+       }
+       return sizeclass;
+}
+
+
+size_t
+hoardHeap::sizeFromClass(const int sizeclass)
+{
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       return _sizeTable[sizeclass];
+}
+
+
+int
+hoardHeap::getReleaseThreshold(const int sizeclass)
+{
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       return _threshold[sizeclass];
+}
+
+
+int
+hoardHeap::numBlocks(const int sizeclass)
+{
+       assert(sizeclass >= 0);
+       assert(sizeclass < SIZE_CLASSES);
+       const size_t s = sizeFromClass(sizeclass);
+       assert(s > 0);
+       const int blksize = align(sizeof(block) + s);
+       // Compute the number of blocks that will go into this superblock.
+       int nb = max_c(1, ((SUPERBLOCK_SIZE - sizeof(superblock)) / blksize));
+       return nb;
+}
+
+
+void
+hoardHeap::lock(void)
+{
+       assert(_magic == HEAP_MAGIC);
+       hoardLock(_lock);
+}
+
+
+void
+hoardHeap::unlock(void)
+{
+       assert(_magic == HEAP_MAGIC);
+       hoardUnlock(_lock);
+}
+
+
+void
+hoardHeap::initLock(void)
+{
+       // Initialize the per-heap lock.
+       hoardLockInit(_lock, "hoard heap");
+}
+
+
+size_t
+hoardHeap::align(const size_t sz)
+{
+       // Align sz up to the nearest multiple of ALIGNMENT.
+       // This is much faster than using multiplication
+       // and division.
+       return (sz + ALIGNMENT_MASK) & ~ALIGNMENT_MASK;
+}
+
+
+void
+hoardHeap::setIndex(int i)
+{
+       _index = i;
+}
+
+
+int
+hoardHeap::getIndex(void)
+{
+       return _index;
+}
+
+
+void
+hoardHeap::recycle(superblock *sb)
+{
+       assert(sb != NULL);
+       assert(sb->getOwner() == this);
+       assert(sb->getNumBlocks() > 1);
+       assert(sb->getNext() == NULL);
+       assert(sb->getPrev() == NULL);
+       assert(hoardHeap::numBlocks(sb->getBlockSizeClass()) > 1);
+       sb->insertBefore(_reusableSuperblocks);
+       _reusableSuperblocks = sb;
+       ++_reusableSuperblocksCount;
+       // printf ("count: %d => %d\n", getIndex(), _reusableSuperblocksCount);
+}
+
+
+superblock *
+hoardHeap::reuse(int sizeclass)
+{
+       if (_reusableSuperblocks == NULL)
+               return NULL;
+
+       // Make sure that we aren't using a sizeclass
+       // that is too big for a 'normal' superblock.
+       if (hoardHeap::numBlocks(sizeclass) <= 1)
+               return NULL;
+
+       // Pop off a superblock from the reusable-superblock list.
+       assert(_reusableSuperblocksCount > 0);
+       superblock *sb = _reusableSuperblocks;
+       _reusableSuperblocks = sb->getNext();
+       sb->remove();
+       assert(sb->getNumBlocks() > 1);
+       --_reusableSuperblocksCount;
+
+       // Reformat the superblock if necessary.
+       if (sb->getBlockSizeClass() != sizeclass) {
+               decStats(sb->getBlockSizeClass(),
+                       sb->getNumBlocks() - sb->getNumAvailable(),
+                       sb->getNumBlocks());
+
+               sb = new((char *)sb) superblock(numBlocks(sizeclass),
+                       sizeclass, this);
+
+               incStats(sizeclass,
+                       sb->getNumBlocks() - sb->getNumAvailable(),
+                       sb->getNumBlocks());
+       }
+
+       assert(sb->getOwner() == this);
+       assert(sb->getBlockSizeClass() == sizeclass);
+       return sb;
+}
+
+}      // namespace BPrivate
+
+#endif // _HEAP_H_
diff --git a/src/system/libroot/posix/malloc/heapstats.h 
b/src/system/libroot/posix/malloc/heapstats.h
new file mode 100644
index 0000000000..387cac65a9
--- /dev/null
+++ b/src/system/libroot/posix/malloc/heapstats.h
@@ -0,0 +1,175 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef _HEAPSTATS_H_
+#define _HEAPSTATS_H_
+
+#include "config.h"
+
+//#include <stdio.h>
+//#include <assert.h>
+
+
+class heapStats {
+       public:
+               heapStats(void)
+                       : U(0), A(0)
+#if HEAP_STATS
+                       , Umax(0), Amax(0)
+#endif
+               {
+               }
+
+               inline const heapStats & operator=(const heapStats & p);
+
+               inline void incStats(int updateU, int updateA);
+               inline void incUStats(void);
+
+               inline void decStats(int updateU, int updateA);
+               inline void decUStats(void);
+               inline void decUStats(int &Uout, int &Aout);
+
+               inline void getStats(int &Uout, int &Aout);
+
+#if HEAP_STATS
+               inline int getUmax(void);
+               inline int getAmax(void);
+#endif
+
+       private:
+               // U and A *must* be the first items in this class --
+               // we will depend on this to atomically update them.
+
+               int U;                                          // Memory in 
use.
+               int A;                                          // Memory 
allocated.
+
+#if HEAP_STATS
+               int Umax;
+               int Amax;
+#endif
+};
+
+
+inline void
+heapStats::incStats(int updateU, int updateA)
+{
+       assert(updateU >= 0);
+       assert(updateA >= 0);
+       assert(U <= A);
+       assert(U >= 0);
+       assert(A >= 0);
+       U += updateU;
+       A += updateA;
+
+#if HEAP_STATS
+       Amax = MAX(Amax, A);
+       Umax = MAX(Umax, U);
+#endif
+
+       assert(U <= A);
+       assert(U >= 0);
+       assert(A >= 0);
+}
+
+
+inline void
+heapStats::incUStats(void)
+{
+       assert(U < A);
+       assert(U >= 0);
+       assert(A >= 0);
+       U++;
+
+#if HEAP_STATS
+       Umax = MAX(Umax, U);
+#endif
+
+       assert(U >= 0);
+       assert(A >= 0);
+}
+
+
+inline void
+heapStats::decStats(int updateU, int updateA)
+{
+       assert(updateU >= 0);
+       assert(updateA >= 0);
+       assert(U <= A);
+       assert(U >= updateU);
+       assert(A >= updateA);
+       U -= updateU;
+       A -= updateA;
+       assert(U <= A);
+       assert(U >= 0);
+       assert(A >= 0);
+}
+
+
+inline void
+heapStats::decUStats(int &Uout, int &Aout)
+{
+       assert(U <= A);
+       assert(U > 0);
+       assert(A >= 0);
+       U--;
+       Uout = U;
+       Aout = A;
+       assert(U >= 0);
+       assert(A >= 0);
+}
+
+
+inline void
+heapStats::decUStats(void)
+{
+       assert(U <= A);
+       assert(U > 0);
+       assert(A >= 0);
+       U--;
+}
+
+
+inline void
+heapStats::getStats(int &Uout, int &Aout)
+{
+       assert(U >= 0);
+       assert(A >= 0);
+       Uout = U;
+       Aout = A;
+       assert(U <= A);
+       assert(U >= 0);
+       assert(A >= 0);
+}
+
+
+#if HEAP_STATS
+inline int
+heapStats::getUmax(void)
+{
+       return Umax;
+}
+
+
+inline int
+heapStats::getAmax(void)
+{
+       return Amax;
+}
+#endif // HEAP_STATS
+
+#endif // _HEAPSTATS_H_
diff --git a/src/system/libroot/posix/malloc/processheap.cpp 
b/src/system/libroot/posix/malloc/processheap.cpp
new file mode 100644
index 0000000000..129cf4c3a0
--- /dev/null
+++ b/src/system/libroot/posix/malloc/processheap.cpp
@@ -0,0 +1,230 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#include <string.h>
+#include <stdio.h>
+
+#include "config.h"
+
+#if USE_PRIVATE_HEAPS
+#      include "privateheap.h"
+#      define HEAPTYPE privateHeap
+#else
+#      define HEAPTYPE threadHeap
+#      include "threadheap.h"
+#endif
+
+#include "processheap.h"
+
+using namespace BPrivate;
+
+
+processHeap::processHeap()
+       :
+       theap((HEAPTYPE*)hoardSbrk(sizeof(HEAPTYPE) * fMaxThreadHeaps)),
+#if HEAP_FRAG_STATS
+       _currentAllocated(0),
+       _currentRequested(0),
+       _maxAllocated(0),
+       _inUseAtMaxAllocated(0),
+       _maxRequested(0),
+#endif
+#if HEAP_LOG
+       _log((Log<MemoryRequest>*)
+               hoardSbrk(sizeof(Log<MemoryRequest>) * (fMaxThreadHeaps + 1))),
+#endif
+       _buffer(NULL),
+       _bufferCount(0)
+{
+       if (theap == NULL)
+               return;
+       new(theap) HEAPTYPE[fMaxThreadHeaps];
+
+#if HEAP_LOG
+       if (_log == NULL)
+               return;
+       new(_log) Log<MemoryRequest>[fMaxThreadHeaps + 1];
+#endif
+
+       int i;
+       // The process heap is heap 0.
+       setIndex(0);
+       for (i = 0; i < fMaxThreadHeaps; i++) {
+               // Set every thread's process heap to this one.
+               theap[i].setpHeap(this);
+               // Set every thread heap's index.
+               theap[i].setIndex(i + 1);
+       }
+#if HEAP_LOG
+       for (i = 0; i < fMaxThreadHeaps + 1; i++) {
+               char fname[255];
+               sprintf(fname, "log%d", i);
+               unlink(fname);
+               _log[i].open(fname);
+       }
+#endif
+#if HEAP_FRAG_STATS
+       hoardLockInit(_statsLock, "hoard stats");
+#endif
+       hoardLockInit(_bufferLock, "hoard buffer");
+}
+
+
+// Print out statistics information.
+void
+processHeap::stats(void)
+{
+#if HEAP_STATS
+       int umax = 0;
+       int amax = 0;
+       for (int j = 0; j < fMaxThreadHeaps; j++) {
+               for (int i = 0; i < SIZE_CLASSES; i++) {
+                       amax += theap[j].maxAllocated(i) * sizeFromClass(i);
+                       umax += theap[j].maxInUse(i) * sizeFromClass(i);
+               }
+       }
+       printf("Amax <= %d, Umax <= %d\n", amax, umax);
+
+#if HEAP_FRAG_STATS
+       amax = getMaxAllocated();
+       umax = getMaxRequested();
+       printf
+       ("Maximum allocated = %d\nMaximum in use = %d\nIn use at max allocated 
= %d\n",
+        amax, umax, getInUseAtMaxAllocated());
+       printf("Still in use = %d\n", _currentRequested);
+       printf("Fragmentation (3) = %f\n",
+                  (float)amax / (float)getInUseAtMaxAllocated());
+       printf("Fragmentation (4) = %f\n", (float)amax / (float)umax);
+#endif
+#endif // HEAP_STATS
+
+#if HEAP_LOG
+       printf("closing logs.\n");
+       fflush(stdout);
+       for (int i = 0; i < fMaxThreadHeaps + 1; i++) {
+               _log[i].close();
+       }
+#endif
+}
+
+
+#if HEAP_FRAG_STATS
+void
+processHeap::setAllocated(int requestedSize, int actualSize)
+{
+       hoardLock(_statsLock);
+       _currentRequested += requestedSize;
+       _currentAllocated += actualSize;
+       if (_currentRequested > _maxRequested) {
+               _maxRequested = _currentRequested;
+       }
+       if (_currentAllocated > _maxAllocated) {
+               _maxAllocated = _currentAllocated;
+               _inUseAtMaxAllocated = _currentRequested;
+       }
+       hoardUnlock(_statsLock);
+}
+
+
+void
+processHeap::setDeallocated(int requestedSize, int actualSize)
+{
+       hoardLock(_statsLock);
+       _currentRequested -= requestedSize;
+       _currentAllocated -= actualSize;
+       hoardUnlock(_statsLock);
+}
+#endif // HEAP_FRAG_STATS
+
+
+// free (ptr, pheap):
+//   inputs: a pointer to an object allocated by malloc().
+//   side effects: returns the block to the object's superblock;
+//                 updates the thread heap's statistics;
+//                 may release the superblock to the process heap.
+
+void
+processHeap::free(void *ptr)
+{
+       // Return if ptr is 0.
+       // This is the behavior prescribed by the standard.
+       if (ptr == 0)
+               return;
+
+       // Find the block and superblock corresponding to this ptr.
+
+       block *b = (block *) ptr - 1;
+       assert(b->isValid());
+
+       // Check to see if this block came from a memalign() call.
+       if (((unsigned long)b->getNext() & 1) == 1) {
+               // It did. Set the block to the actual block header.
+               b = (block *) ((unsigned long)b->getNext() & ~1);
+               assert(b->isValid());
+       }
+
+       b->markFree();
+
+       superblock *sb = b->getSuperblock();
+       assert(sb);
+       assert(sb->isValid());
+
+       const int sizeclass = sb->getBlockSizeClass();
+
+       //
+       // Return the block to the superblock,
+       // find the heap that owns this superblock
+       // and update its statistics.
+       //
+
+       hoardHeap *owner;
+
+       // By acquiring the up lock on the superblock,
+       // we prevent it from moving to the global heap.
+       // This eventually pins it down in one heap,
+       // so this loop is guaranteed to terminate.
+       // (It should generally take no more than two iterations.)
+       sb->upLock();
+       while (1) {
+               owner = sb->getOwner();
+               owner->lock();
+               if (owner == sb->getOwner()) {
+                       break;
+               } else {
+                       owner->unlock();
+               }
+               // Suspend to allow ownership to quiesce.
+               hoardYield();
+       }
+
+#if HEAP_LOG
+       MemoryRequest m;
+       m.free(ptr);
+       getLog(owner->getIndex()).append(m);
+#endif
+#if HEAP_FRAG_STATS
+       setDeallocated(b->getRequestedSize(), 0);
+#endif
+
+       int sbUnmapped = owner->freeBlock(b, sb, sizeclass, this);
+
+       owner->unlock();
+       if (!sbUnmapped)
+               sb->upUnlock();
+}
diff --git a/src/system/libroot/posix/malloc/processheap.h 
b/src/system/libroot/posix/malloc/processheap.h
new file mode 100644
index 0000000000..2c94af90eb
--- /dev/null
+++ b/src/system/libroot/posix/malloc/processheap.h
@@ -0,0 +1,267 @@
+///-*-C++-*-//////////////////////////////////////////////////////////////////
+//
+// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+//        for Shared-Memory Multiprocessors
+// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+// Copyright (c) 1998-2000, The University of Texas at Austin.
+//
+// This library is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Library General Public License as
+// published by the Free Software Foundation, http://www.fsf.org.
+//
+// This library is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Library General Public License for more details.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+/* We use one processHeap for the whole program. */
+
+#ifndef _PROCESSHEAP_H_
+#define _PROCESSHEAP_H_
+
+#include "config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "arch-specific.h"
+#include "heap.h"
+#if USE_PRIVATE_HEAPS
+#      include "privateheap.h"
+#      define HEAPTYPE privateHeap
+#else
+#      define HEAPTYPE threadHeap
+#      include "threadheap.h"
+#endif
+
+#if HEAP_LOG
+#      include "memstat.h"
+#      include "log.h"
+#endif
+
+
+namespace BPrivate {
+
+class processHeap : public hoardHeap {
+       public:
+               // Always grab at least this many superblocks' worth of memory 
which
+               // we parcel out.
+               enum { REFILL_NUMBER_OF_SUPERBLOCKS = 16 };
+
+               processHeap();
+               ~processHeap(void)
+               {
+#if HEAP_STATS
+                       stats();
+#endif
+               }
+               // Memory deallocation routines.
+               void free(void *ptr);
+
+               // Print out statistics information.
+               void stats(void);
+
+               // Get a thread heap index.
+               inline int getHeapIndex(void);
+
+               // Get thread heap max.
+               inline int getMaxThreadHeaps(void);
+
+               // Get the thread heap with index i.
+               inline HEAPTYPE & getHeap(int i);
+
+               // Extract a superblock.
+               inline superblock *acquire(const int c, hoardHeap * dest);
+
+               // Get space for a superblock.
+               inline char *getSuperblockBuffer(void);
+
+               // Insert a superblock.
+               inline void release(superblock * sb);
+
+#if HEAP_LOG
+               // Get the log for index i.
+               inline Log < MemoryRequest > &getLog(int i);
+#endif
+
+#if HEAP_FRAG_STATS
+               // Declare that we have allocated an object.
+               void setAllocated(int requestedSize, int actualSize);
+
+               // Declare that we have deallocated an object.
+               void setDeallocated(int requestedSize, int actualSize);
+
+               // Return the number of wasted bytes at the high-water mark
+               // (maxAllocated - maxRequested)
+               inline int getFragmentation(void);
+
+               int
+               getMaxAllocated(void)
+               {
+                       return _maxAllocated;
+               }
+
+               int
+               getInUseAtMaxAllocated(void)
+               {
+                       return _inUseAtMaxAllocated;
+               }
+
+               int
+               getMaxRequested(void)
+               {
+                       return _maxRequested;
+               }
+#endif
+
+       private:
+               // Hide the lock & unlock methods.
+               void
+               lock(void)
+               {
+                       hoardHeap::lock();
+               }
+
+               void
+               unlock(void)
+               {
+                       hoardHeap::unlock();
+               }
+
+               // Prevent copying and assignment.
+               processHeap(const processHeap &);
+               const processHeap & operator=(const processHeap &);
+
+               // The per-thread heaps.
+               HEAPTYPE* theap;
+
+#if HEAP_FRAG_STATS
+               // Statistics required to compute fragmentation.  We cannot
+               // unintrusively keep track of these on a multiprocessor, 
because
+               // this would become a bottleneck.
+
+               int _currentAllocated;
+               int _currentRequested;
+               int _maxAllocated;
+               int _maxRequested;
+               int _inUseAtMaxAllocated;
+               int _fragmentation;
+
+               // A lock to protect these statistics.
+               hoardLockType _statsLock;
+#endif
+
+#if HEAP_LOG
+               Log < MemoryRequest >* _log;
+#endif
+
+               // A lock for the superblock buffer.
+               hoardLockType _bufferLock;
+
+               char *_buffer;
+               int _bufferCount;
+};
+
+
+HEAPTYPE &
+processHeap::getHeap(int i)
+{
+       assert(theap != NULL);
+       assert(i >= 0);
+       assert(i < fMaxThreadHeaps);
+       return theap[i];
+}
+
+
+#if HEAP_LOG
+Log<MemoryRequest > &
+processHeap::getLog(int i)
+{
+       assert(_log != NULL);
+       assert(i >= 0);
+       assert(i < fMaxThreadHeaps + 1);
+       return _log[i];
+}
+#endif
+
+
+// Hash out the thread id to a heap and return an index to that heap.
+
+int
+processHeap::getHeapIndex(void)
+{
+       // Here we use the number of processors as the maximum number of heaps.
+       // In fact, for efficiency, we just round up to the highest power of 
two,
+       // times two.
+       int tid = find_thread(NULL) & _numProcessorsMask;
+       assert(tid < fMaxThreadHeaps);
+       return tid;
+}
+
+
+// Return the maximum number of heaps.
+
+int
+processHeap::getMaxThreadHeaps(void)
+{
+       return fMaxThreadHeaps;
+}
+
+
+superblock *
+processHeap::acquire(const int sizeclass, hoardHeap * dest)
+{
+       lock();
+
+       // Remove the superblock with the most free space.
+       superblock *maxSb = removeMaxSuperblock(sizeclass);
+       if (maxSb)
+               maxSb->setOwner(dest);
+
+       unlock();
+
+       return maxSb;
+}
+
+
+inline char *
+processHeap::getSuperblockBuffer(void)
+{
+       char *buf;
+       hoardLock(_bufferLock);
+       if (_bufferCount == 0) {
+               _buffer = (char *)hoardSbrk(SUPERBLOCK_SIZE
+                       * REFILL_NUMBER_OF_SUPERBLOCKS);
+               _bufferCount = REFILL_NUMBER_OF_SUPERBLOCKS;
+       }
+
+       buf = _buffer;
+       _buffer += SUPERBLOCK_SIZE;
+       _bufferCount--;
+       hoardUnlock(_bufferLock);
+
+       return buf;
+}
+
+
+// Put a superblock back into our list of superblocks.
+
+void
+processHeap::release(superblock *sb)
+{
+       assert(EMPTY_FRACTION * sb->getNumAvailable() > sb->getNumBlocks());

[ *** diff truncated: 1351 lines dropped *** ]


############################################################################

Revision:    hrev53600
Commit:      1f391e37ea3dd9f166204c3e658eab29ace4284b
URL:         https://git.haiku-os.org/haiku/commit/?id=1f391e37ea3d
Author:      Adrien Destugues <pulkomandy@xxxxxxxxxxxxx>
Date:        Sun Nov 24 14:35:51 2019 UTC

Switch back to hoard2 for now.

We'll reconsider rpmalloc when it has less overhead.

----------------------------------------------------------------------------


Other related posts: