[haiku-commits] r34528 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 6 Dec 2009 18:18:05 +0100 (CET)

Author: bonefish
Date: 2009-12-06 18:18:04 +0100 (Sun, 06 Dec 2009)
New Revision: 34528
Changeset: http://dev.haiku-os.org/changeset/34528/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h
   haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp
   haiku/trunk/src/system/kernel/vm/VMArea.cpp
   haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp
   haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.h
   haiku/trunk/src/system/kernel/vm/VMKernelArea.cpp
   haiku/trunk/src/system/kernel/vm/VMKernelArea.h
   haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.cpp
   haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.h
   haiku/trunk/src/system/kernel/vm/VMUserArea.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
Log:
* Renamed VMAddressSpace::ResizeArea{Head,Tail}() to ShrinkArea{Head,Tail}()
  to clarify that they never enlarge the area.
* Reimplemented VMKernelAddressSpace. It is somewhat inspired by Bonwick's
  vmem resource allocator (though we have different requirements):
  - We consider the complete address space to be divided into contiguous
    ranges of type free, reserved, or area, each range being represented by
    a VMKernelAddressRange object.
  - The range objects are managed in an AVL tree and a doubly linked list
    (the latter only for faster iteration) sorted by address. This provides
    O(log(n)) lookup, insertion and removal.
  - For each power of two size we maintain a list of free ranges of at least
    that size. Thus for the most common case of B_ANY*_ADDRESS area
    allocation, we find a free range in constant time (the rest of the
    processing being O(log(n))) with a rather good fit. This should also
    help avoiding address space fragmentation.
  While the new implementation should be faster, particularly with an
  increasing number of areas, I couldn't measure any difference in the -j2
  haiku build. From a cursory test the -j8 build hasn't tangibly benefitted
  either.


Modified: haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h      2009-12-06 
16:01:21 UTC (rev 34527)
+++ haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h      2009-12-06 
17:18:04 UTC (rev 34528)
@@ -62,6 +62,8 @@
 
                        VMAddressSpace*&        HashTableLink() { return 
fHashTableLink; }
 
+       virtual status_t                        InitObject();
+
        virtual VMArea*                         FirstArea() const = 0;
        virtual VMArea*                         NextArea(VMArea* area) const = 
0;
 
@@ -70,13 +72,15 @@
                                                                        uint32 
protection) = 0;
        virtual void                            DeleteArea(VMArea* area) = 0;
        virtual status_t                        InsertArea(void** _address, 
uint32 addressSpec,
-                                                                       addr_t 
size, VMArea* area) = 0;
+                                                                       size_t 
size, VMArea* area) = 0;
        virtual void                            RemoveArea(VMArea* area) = 0;
 
        virtual bool                            CanResizeArea(VMArea* area, 
size_t newSize) = 0;
        virtual status_t                        ResizeArea(VMArea* area, size_t 
newSize) = 0;
-       virtual status_t                        ResizeAreaHead(VMArea* area, 
size_t size) = 0;
-       virtual status_t                        ResizeAreaTail(VMArea* area, 
size_t size) = 0;
+       virtual status_t                        ShrinkAreaHead(VMArea* area, 
size_t newSize)
+                                                                       = 0;
+       virtual status_t                        ShrinkAreaTail(VMArea* area, 
size_t newSize)
+                                                                       = 0;
 
        virtual status_t                        ReserveAddressRange(void** 
_address,
                                                                        uint32 
addressSpec, size_t size,

Modified: haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp 2009-12-06 16:01:21 UTC 
(rev 34527)
+++ haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp 2009-12-06 17:18:04 UTC 
(rev 34528)
@@ -12,6 +12,8 @@
 
 #include <stdlib.h>
 
+#include <new>
+
 #include <KernelExport.h>
 
 #include <util/OpenHashTable.h>
@@ -181,17 +183,24 @@
 }
 
 
+status_t
+VMAddressSpace::InitObject()
+{
+       return B_OK;
+}
+
+
 void
 VMAddressSpace::Dump() const
 {
        kprintf("dump of address space at %p:\n", this);
-       kprintf("id: 0x%lx\n", fID);
-       kprintf("ref_count: %ld\n", fRefCount);
-       kprintf("fault_count: %ld\n", fFaultCount);
+       kprintf("id: %" B_PRId32 "\n", fID);
+       kprintf("ref_count: %" B_PRId32 "\n", fRefCount);
+       kprintf("fault_count: %" B_PRId32 "\n", fFaultCount);
        kprintf("translation_map: %p\n", &fTranslationMap);
-       kprintf("base: 0x%lx\n", fBase);
-       kprintf("end: 0x%lx\n", fEndAddress);
-       kprintf("change_count: 0x%lx\n", fChangeCount);
+       kprintf("base: %#" B_PRIxADDR "\n", fBase);
+       kprintf("end: %#" B_PRIxADDR "\n", fEndAddress);
+       kprintf("change_count: %" B_PRId32 "\n", fChangeCount);
 }
 
 
@@ -205,11 +214,17 @@
        if (addressSpace == NULL)
                return B_NO_MEMORY;
 
+       status_t status = addressSpace->InitObject();
+       if (status != B_OK) {
+               delete addressSpace;
+               return status;
+       }
+
        TRACE(("vm_create_aspace: team %ld (%skernel): %#lx bytes starting at "
                "%#lx => %p\n", id, kernel ? "!" : "", size, base, 
addressSpace));
 
        // initialize the corresponding translation map
-       status_t status = arch_vm_translation_map_init_map(
+       status = arch_vm_translation_map_init_map(
                &addressSpace->fTranslationMap, kernel);
        if (status != B_OK) {
                delete addressSpace;
@@ -321,9 +336,9 @@
                                areaSize += area->Size();
                        }
                }
-               kprintf("%p  %6ld   %#010lx   %#10lx   %10ld   %10lld\n",
-                       space, space->ID(), space->Base(), space->EndAddress(), 
areaCount,
-                       areaSize);
+               kprintf("%p  %6" B_PRId32 "   %#010" B_PRIxADDR "   %#10" 
B_PRIxADDR
+                       "   %10" B_PRId32 "   %10" B_PRIdOFF "\n", space, 
space->ID(),
+                       space->Base(), space->EndAddress(), areaCount, 
areaSize);
        }
 
        return 0;

Modified: haiku/trunk/src/system/kernel/vm/VMArea.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMArea.cpp 2009-12-06 16:01:21 UTC (rev 
34527)
+++ haiku/trunk/src/system/kernel/vm/VMArea.cpp 2009-12-06 17:18:04 UTC (rev 
34528)
@@ -10,6 +10,8 @@
 
 #include <vm/VMArea.h>
 
+#include <new>
+
 #include <heap.h>
 
 

Modified: haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp   2009-12-06 
16:01:21 UTC (rev 34527)
+++ haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp   2009-12-06 
17:18:04 UTC (rev 34528)
@@ -22,14 +22,39 @@
 
 //#define TRACE_VM
 #ifdef TRACE_VM
-#      define TRACE(x) dprintf x
+#      define TRACE(x...) dprintf(x)
 #else
-#      define TRACE(x) ;
+#      define TRACE(x...) ;
 #endif
 
 
+//#define PARANOIA_CHECKS
+#ifdef PARANOIA_CHECKS
+#      define PARANOIA_CHECK_STRUCTURES()      _CheckStructures()
+#else
+#      define PARANOIA_CHECK_STRUCTURES()      do {} while (false)
+#endif
+
+
+static int
+ld(size_t value)
+{
+       int index = -1;
+       while (value > 0) {
+               value >>= 1;
+               index++;
+       }
+
+       return index;
+}
+
+
 /*!    Verifies that an area with the given aligned base and size fits into
        the spot defined by base and limit and checks for overflows.
+       \param base Minimum base address valid for the area.
+       \param alignedBase The base address of the range to check.
+       \param size The size of the area.
+       \param limit The last (inclusive) addresss of the range to check.
 */
 static inline bool
 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
@@ -39,10 +64,12 @@
 }
 
 
+// #pragma mark - VMKernelAddressSpace
+
+
 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t 
size)
        :
-       VMAddressSpace(id, base, size, "kernel address space"),
-       fAreaHint(NULL)
+       VMAddressSpace(id, base, size, "kernel address space")
 {
 }
 
@@ -53,24 +80,48 @@
 }
 
 
+status_t
+VMKernelAddressSpace::InitObject()
+{
+       // create the free lists
+       size_t size = fEndAddress - fBase + 1;
+       fFreeListCount = ld(size) - PAGE_SHIFT + 1;
+       fFreeLists = new(nogrow) RangeFreeList[fFreeListCount];
+       if (fFreeLists == NULL)
+               return B_NO_MEMORY;
+
+       Range* range = new(nogrow) Range(fBase, size, Range::RANGE_FREE);
+       if (range == NULL)
+               return B_NO_MEMORY;
+
+       _InsertRange(range);
+
+       TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
+               " - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
+               fFreeListCount);
+
+       return B_OK;
+}
+
+
 inline VMArea*
 VMKernelAddressSpace::FirstArea() const
 {
-       VMKernelArea* area = fAreas.Head();
-       while (area != NULL && area->id == RESERVED_AREA_ID)
-               area = fAreas.GetNext(area);
-       return area;
+       Range* range = fRangeList.Head();
+       while (range != NULL && range->type != Range::RANGE_AREA)
+               range = fRangeList.GetNext(range);
+       return range != NULL ? range->area : NULL;
 }
 
 
 inline VMArea*
 VMKernelAddressSpace::NextArea(VMArea* _area) const
 {
-       VMKernelArea* area = static_cast<VMKernelArea*>(_area);
-       area = fAreas.GetNext(area);
-       while (area != NULL && area->id == RESERVED_AREA_ID)
-               area = fAreas.GetNext(area);
-       return area;
+       Range* range = static_cast<VMKernelArea*>(_area)->Range();
+       do {
+               range = fRangeList.GetNext(range);
+       } while (range != NULL && range->type != Range::RANGE_AREA);
+       return range != NULL ? range->area : NULL;
 }
 
 
@@ -85,6 +136,8 @@
 void
 VMKernelAddressSpace::DeleteArea(VMArea* area)
 {
+       TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", area);
+
        delete static_cast<VMKernelArea*>(area);
 }
 
@@ -93,22 +146,12 @@
 VMArea*
 VMKernelAddressSpace::LookupArea(addr_t address) const
 {
-       // check the area hint first
-       if (fAreaHint != NULL && fAreaHint->ContainsAddress(address))
-               return fAreaHint;
+       Range* range = fRangeTree.FindClosest(address, true);
+       if (range == NULL || range->type != Range::RANGE_AREA)
+               return NULL;
 
-       for (VMKernelAreaList::ConstIterator it = fAreas.GetIterator();
-                       VMKernelArea* area = it.Next();) {
-               if (area->id == RESERVED_AREA_ID)
-                       continue;
-
-               if (area->ContainsAddress(address)) {
-                       fAreaHint = area;
-                       return area;
-               }
-       }
-
-       return NULL;
+       VMKernelArea* area = range->area;
+       return area->ContainsAddress(address) ? area : NULL;
 }
 
 
@@ -119,46 +162,31 @@
 */
 status_t
 VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
-       addr_t size, VMArea* _area)
+       size_t size, VMArea* _area)
 {
+       TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" 
B_PRIxSIZE
+               ", %p \"%s\")\n", *_address, addressSpec, size, _area, 
_area->name);
+
        VMKernelArea* area = static_cast<VMKernelArea*>(_area);
 
-       addr_t searchBase, searchEnd;
-       status_t status;
+       Range* range;
+       status_t error = _AllocateRange((addr_t)*_address, addressSpec, size,
+               addressSpec == B_EXACT_ADDRESS, range);
+       if (error != B_OK)
+               return error;
 
-       switch (addressSpec) {
-               case B_EXACT_ADDRESS:
-                       searchBase = (addr_t)*_address;
-                       searchEnd = (addr_t)*_address + (size - 1);
-                       break;
+       range->type = Range::RANGE_AREA;
+       range->area = area;
+       area->SetRange(range);
+       area->SetBase(range->base);
+       area->SetSize(range->size);
 
-               case B_BASE_ADDRESS:
-                       searchBase = (addr_t)*_address;
-                       searchEnd = fEndAddress;
-                       break;
+       *_address = (void*)area->Base();
+       fFreeSpace -= area->Size();
 
-               case B_ANY_ADDRESS:
-               case B_ANY_KERNEL_ADDRESS:
-               case B_ANY_KERNEL_BLOCK_ADDRESS:
-                       searchBase = fBase;
-                       // TODO: remove this again when vm86 mode is moved into 
the kernel
-                       // completely (currently needs a userland address 
space!)
-                       if (searchBase == USER_BASE)
-                               searchBase = USER_BASE_ANY;
-                       searchEnd = fEndAddress;
-                       break;
+       PARANOIA_CHECK_STRUCTURES();
 
-               default:
-                       return B_BAD_VALUE;
-       }
-
-       status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, 
area);
-       if (status == B_OK) {
-               *_address = (void*)area->Base();
-               fFreeSpace -= area->Size();
-       }
-
-       return status;
+       return B_OK;
 }
 
 
@@ -166,104 +194,157 @@
 void
 VMKernelAddressSpace::RemoveArea(VMArea* _area)
 {
+       TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
+
        VMKernelArea* area = static_cast<VMKernelArea*>(_area);
 
-       fAreas.Remove(area);
+       _FreeRange(area->Range());
 
-       if (area->id != RESERVED_AREA_ID) {
-               IncrementChangeCount();
-               fFreeSpace += area->Size();
+       fFreeSpace += area->Size();
 
-               if (area == fAreaHint)
-                       fAreaHint = NULL;
-       }
+       PARANOIA_CHECK_STRUCTURES();
 }
 
 
 bool
 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
 {
-       VMKernelArea* next = fAreas.GetNext(static_cast<VMKernelArea*>(area));
-       addr_t newEnd = area->Base() + (newSize - 1);
-       if (next == NULL) {
-               if (fEndAddress >= newEnd)
-                       return true;
-       } else {
-               if (next->Base() > newEnd)
-                       return true;
-       }
+       Range* range = static_cast<VMKernelArea*>(area)->Range();
 
-       // If the area was created inside a reserved area, it can
-       // also be resized in that area
-       // TODO: if there is free space after the reserved area, it could
-       // be used as well...
-       if (next->id == RESERVED_AREA_ID && next->cache_offset <= area->Base()
-               && next->Base() + (next->Size() - 1) >= newEnd) {
+       if (newSize <= range->size)
                return true;
+
+       Range* nextRange = fRangeList.GetNext(range);
+       if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
+               return false;
+
+       if (nextRange->type == Range::RANGE_RESERVED
+               && nextRange->reserved.base > range->base) {
+               return false;
        }
 
-       return false;
+       // TODO: If there is free space after a reserved range (or vice versa), 
it
+       // could be used as well.
+       return newSize - range->size <= nextRange->size;
 }
 
 
 status_t
 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
 {
+       TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
+               newSize);
+
        VMKernelArea* area = static_cast<VMKernelArea*>(_area);
+       Range* range = area->Range();
 
-       addr_t newEnd = area->Base() + (newSize - 1);
-       VMKernelArea* next = fAreas.GetNext(area);
-       if (next != NULL && next->Base() <= newEnd) {
-               if (next->id != RESERVED_AREA_ID
-                       || next->cache_offset > area->Base()
-                       || next->Base() + (next->Size() - 1) < newEnd) {
-                       panic("resize situation for area %p has changed 
although we "
-                               "should have the address space lock", area);
-                       return B_ERROR;
+       if (newSize == range->size)
+               return B_OK;
+
+       Range* nextRange = fRangeList.GetNext(range);
+
+       if (newSize < range->size) {
+               if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
+                       // a free range is following -- just enlarge it
+                       _FreeListRemoveRange(nextRange, nextRange->size);
+                       nextRange->size += range->size - newSize;
+                       nextRange->base = range->base + newSize;
+                       _FreeListInsertRange(nextRange, nextRange->size);
+               } else {
+                       // no free range following -- we need to allocate a new 
one and
+                       // insert it
+                       nextRange = new(nogrow) Range(range->base + newSize,
+                               range->size - newSize, Range::RANGE_FREE);
+                       if (nextRange == NULL)
+                               return B_NO_MEMORY;
+                       _InsertRange(nextRange);
                }
+       } else {
+               if (nextRange == NULL
+                       || (nextRange->type == Range::RANGE_RESERVED
+                               && nextRange->reserved.base > range->base)) {
+                       return B_BAD_VALUE;
+               }
+               // TODO: If there is free space after a reserved range (or vice 
versa),
+               // it could be used as well.
+               size_t sizeDiff = newSize - range->size;
+               if (sizeDiff > nextRange->size)
+                       return B_BAD_VALUE;
 
-               // resize reserved area
-               addr_t offset = area->Base() + newSize - next->Base();
-               if (next->Size() <= offset) {
-                       RemoveArea(next);
-                       free(next);
+               if (sizeDiff == nextRange->size) {
+                       // The next range is completely covered -- remove and 
delete it.
+                       _RemoveRange(nextRange);
+                       delete nextRange;
                } else {
-                       status_t error = ResizeAreaHead(next, next->Size() - 
offset);
-                       if (error != B_OK)
-                               return error;
+                       // The next range is only partially covered -- shrink 
it.
+                       if (nextRange->type == Range::RANGE_FREE)
+                               _FreeListRemoveRange(nextRange, 
nextRange->size);
+                       nextRange->size -= sizeDiff;
+                       nextRange->base = range->base + newSize;
+                       if (nextRange->type == Range::RANGE_FREE)
+                               _FreeListInsertRange(nextRange, 
nextRange->size);
                }
        }
 
-       return ResizeAreaTail(area, newSize);
-               // TODO: In case of error we should undo the change to the 
reserved
-               // area.
+       range->size = newSize;
+       area->SetSize(newSize);
+
+       IncrementChangeCount();
+       PARANOIA_CHECK_STRUCTURES();
+       return B_OK;
 }
 
 
 status_t
-VMKernelAddressSpace::ResizeAreaHead(VMArea* area, size_t size)
+VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize)
 {
-       size_t oldSize = area->Size();
-       if (size == oldSize)
+       TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", 
_area,
+               newSize);
+
+       VMKernelArea* area = static_cast<VMKernelArea*>(_area);
+       Range* range = area->Range();
+
+       if (newSize == range->size)
                return B_OK;
 
-       area->SetBase(area->Base() + oldSize - size);
-       area->SetSize(size);
+       if (newSize > range->size)
+               return B_BAD_VALUE;
 
+       Range* previousRange = fRangeList.GetPrevious(range);
+
+       size_t sizeDiff = range->size - newSize;
+       if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
+               // the previous range is free -- just enlarge it
+               _FreeListRemoveRange(previousRange, previousRange->size);
+               previousRange->size += sizeDiff;
+               _FreeListInsertRange(previousRange, previousRange->size);
+               range->base += sizeDiff;
+               range->size = newSize;
+       } else {
+               // no free range before -- we need to allocate a new one and
+               // insert it
+               previousRange = new(nogrow) Range(range->base, sizeDiff,
+                       Range::RANGE_FREE);
+               if (previousRange == NULL)
+                       return B_NO_MEMORY;
+               range->base += sizeDiff;
+               range->size = newSize;
+               _InsertRange(previousRange);
+       }
+
+       area->SetBase(range->base);
+       area->SetSize(range->size);
+
+       IncrementChangeCount();
+       PARANOIA_CHECK_STRUCTURES();
        return B_OK;
 }
 
 
 status_t
-VMKernelAddressSpace::ResizeAreaTail(VMArea* area, size_t size)
+VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize)
 {
-       size_t oldSize = area->Size();
-       if (size == oldSize)
-               return B_OK;
-
-       area->SetSize(size);
-
-       return B_OK;
+       return ResizeArea(area, newSize);
 }
 
 
@@ -271,27 +352,28 @@
 VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
        size_t size, uint32 flags)
 {
-       // check to see if this address space has entered DELETE state
-       if (fDeleting) {
-               // okay, someone is trying to delete this address space now, so 
we
-               // can't insert the area, let's back out
+       TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
+               B_PRIxSIZE ", %#" B_PRIx32 ")\n", *_address, addressSpec, size, 
flags);
+
+       // Don't allow range reservations, if the address space is about to be
+       // deleted.
+       if (fDeleting)
                return B_BAD_TEAM_ID;
-       }
 
-       VMKernelArea* area = VMKernelArea::CreateReserved(this, flags);
-       if (area == NULL)
-               return B_NO_MEMORY;
+       Range* range;
+       status_t error = _AllocateRange((addr_t)*_address, addressSpec, size, 
false,
+               range);
+       if (error != B_OK)
+               return error;
 
-       status_t status = InsertArea(_address, addressSpec, size, area);
-       if (status != B_OK) {
-               free(area);
-               return status;
-       }
+       range->type = Range::RANGE_RESERVED;
+       range->reserved.base = range->base;
+       range->reserved.flags = flags;
 
-       area->cache_offset = area->Base();
-               // we cache the original base address here
+       *_address = (void*)range->base;
 
        Get();
+       PARANOIA_CHECK_STRUCTURES();
        return B_OK;
 }
 
@@ -299,28 +381,34 @@
 status_t
 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
 {
-       // check to see if this address space has entered DELETE state
-       if (fDeleting) {
-               // okay, someone is trying to delete this address space now, so 
we can't
-               // insert the area, so back out
+       TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
+               B_PRIxSIZE ")\n", address, size);
+
+       // Don't allow range unreservations, if the address space is about to be
+       // deleted. UnreserveAllAddressRanges() must be used.
+       if (fDeleting)
                return B_BAD_TEAM_ID;
-       }
 
-       // search area list and remove any matching reserved ranges
+       // search range list and remove any matching reserved ranges
        addr_t endAddress = address + (size - 1);
-       for (VMKernelAreaList::Iterator it = fAreas.GetIterator();
-                       VMKernelArea* area = it.Next();) {
-               // the area must be completely part of the reserved range
-               if (area->Base() + (area->Size() - 1) > endAddress)
-                       break;
-               if (area->id == RESERVED_AREA_ID && area->Base() >= 
(addr_t)address) {
-                       // remove reserved range
-                       RemoveArea(area);
+       Range* range = fRangeTree.FindClosest(address, false);
+       while (range != NULL && range->base + (range->size - 1) <= endAddress) {
+               // Get the next range for the iteration -- we need to skip free 
ranges,
+               // since _FreeRange() might join them with the current range 
and delete
+               // them.
+               Range* nextRange = fRangeList.GetNext(range);
+               while (nextRange != NULL && nextRange->type == 
Range::RANGE_FREE)
+                       nextRange = fRangeList.GetNext(nextRange);
+
+               if (range->type == Range::RANGE_RESERVED) {
+                       _FreeRange(range);
                        Put();
-                       free(area);
                }
+
+               range = nextRange;
        }
 
+       PARANOIA_CHECK_STRUCTURES();
        return B_OK;
 }
 
@@ -328,14 +416,24 @@
 void
 VMKernelAddressSpace::UnreserveAllAddressRanges()
 {
-       for (VMKernelAreaList::Iterator it = fAreas.GetIterator();
-                       VMKernelArea* area = it.Next();) {
-               if (area->id == RESERVED_AREA_ID) {
-                       RemoveArea(area);
+       Range* range = fRangeList.Head();
+       while (range != NULL) {
+               // Get the next range for the iteration -- we need to skip free 
ranges,
+               // since _FreeRange() might join them with the current range 
and delete
+               // them.
+               Range* nextRange = fRangeList.GetNext(range);
+               while (nextRange != NULL && nextRange->type == 
Range::RANGE_FREE)
+                       nextRange = fRangeList.GetNext(nextRange);
+
+               if (range->type == Range::RANGE_RESERVED) {
+                       _FreeRange(range);
                        Put();
-                       free(area);
                }
+
+               range = nextRange;
        }
+
+       PARANOIA_CHECK_STRUCTURES();
 }
 
 
@@ -343,326 +441,467 @@
 VMKernelAddressSpace::Dump() const
 {
        VMAddressSpace::Dump();
-       kprintf("area_hint: %p\n", fAreaHint);
 
        kprintf("area_list:\n");
 
-       for (VMKernelAreaList::ConstIterator it = fAreas.GetIterator();
-                       VMKernelArea* area = it.Next();) {
-               kprintf(" area 0x%lx: ", area->id);
-               kprintf("base_addr = 0x%lx ", area->Base());
-               kprintf("size = 0x%lx ", area->Size());
+       for (RangeList::ConstIterator it = fRangeList.GetIterator();
+                       Range* range = it.Next();) {
+               if (range->type != Range::RANGE_AREA)
+                       continue;
+
+               VMKernelArea* area = range->area;
+               kprintf(" area %" B_PRId32 ": ", area->id);
+               kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
+               kprintf("size = %#" B_PRIxSIZE " ", area->Size());
                kprintf("name = '%s' ", area->name);
-               kprintf("protection = 0x%lx\n", area->protection);
+               kprintf("protection = %#" B_PRIx32 "\n", area->protection);
        }
 }
 
 
-/*!    Finds a reserved area that covers the region spanned by \a start and
-       \a size, inserts the \a area into that region and makes sure that
-       there are reserved regions for the remaining parts.
-*/
-status_t
-VMKernelAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
-       VMKernelArea* area)
+inline void
+VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
 {
-       VMKernelArea* next;
+       TRACE("    VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
+               ", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, 
range->base,
+               range->size, range->type, size, ld(size) - PAGE_SHIFT);
 
-       for (VMKernelAreaList::Iterator it = fAreas.GetIterator();
-                       (next = it.Next()) != NULL;) {
-               if (next->Base() <= start
-                       && next->Base() + (next->Size() - 1) >= start + (size - 
1)) {
-                       // This area covers the requested range
-                       if (next->id != RESERVED_AREA_ID) {
-                               // but it's not reserved space, it's a real area
-                               return B_BAD_VALUE;
-                       }
+       fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
+}
 
-                       break;
-               }
-       }
 
-       if (next == NULL)
-               return B_ENTRY_NOT_FOUND;
+inline void
+VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
+{
+       TRACE("    VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
+               ", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, 
range->base,
+               range->size, range->type, size, ld(size) - PAGE_SHIFT);
 
-       // Now we have to transfer the requested part of the reserved
-       // range to the new area - and remove, resize or split the old
-       // reserved area.
+       fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
+}
 
-       if (start == next->Base()) {
-               // the area starts at the beginning of the reserved range
-               fAreas.Insert(next, area);
 
-               if (size == next->Size()) {
-                       // the new area fully covers the reversed range
-                       fAreas.Remove(next);
-                       Put();
-                       free(next);
-               } else {
-                       // resize the reserved range behind the area
-                       next->SetBase(next->Base() + size);
-                       next->SetSize(next->Size() - size);
-               }
-       } else if (start + size == next->Base() + next->Size()) {
-               // the area is at the end of the reserved range
-               fAreas.Insert(fAreas.GetNext(next), area);
+void
+VMKernelAddressSpace::_InsertRange(Range* range)
+{
+       TRACE("    VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
+               B_PRIxSIZE ", %d))\n", range, range->base, range->size, 
range->type);
 
-               // resize the reserved range before the area
-               next->SetSize(start - next->Base());
-       } else {
-               // the area splits the reserved range into two separate ones
-               // we need a new reserved area to cover this space
-               VMKernelArea* reserved = VMKernelArea::CreateReserved(this,
-                       next->protection);
-               if (reserved == NULL)
-                       return B_NO_MEMORY;
+       // insert at the correct position in the range list
+       Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
+       fRangeList.Insert(
+               insertBeforeRange != NULL
+                       ? fRangeList.GetNext(insertBeforeRange) : 
fRangeList.Head(),
+               range);
 
-               Get();
-               fAreas.Insert(fAreas.GetNext(next), reserved);
-               fAreas.Insert(reserved, area);
+       // insert into tree
+       fRangeTree.Insert(range);
 
-               // resize regions
-               reserved->SetSize(next->Base() + next->Size() - start - size);
-               next->SetSize(start - next->Base());
-               reserved->SetBase(start + size);
-               reserved->cache_offset = next->cache_offset;
-       }
+       // insert in the free ranges list, if the range is free
+       if (range->type == Range::RANGE_FREE)
+               _FreeListInsertRange(range, range->size);
+}
 
-       area->SetBase(start);
-       area->SetSize(size);
-       IncrementChangeCount();
 
-       return B_OK;
+void
+VMKernelAddressSpace::_RemoveRange(Range* range)
+{
+       TRACE("    VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
+               B_PRIxSIZE ", %d))\n", range, range->base, range->size, 
range->type);
+
+       // remove from tree and range list
+       // insert at the correct position in the range list
+       fRangeTree.Remove(range);
+       fRangeList.Remove(range);
+
+       // if it is a free range, also remove it from the free list
+       if (range->type == Range::RANGE_FREE)
+               _FreeListRemoveRange(range, range->size);
 }
 
 
-/*!    Must be called with this address space's write lock held */
 status_t
-VMKernelAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
-       uint32 addressSpec, VMKernelArea* area)
+VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
+       size_t size, bool allowReservedRange, Range*& _range)
 {
-       VMKernelArea* last = NULL;
-       VMKernelArea* next;
-       bool foundSpot = false;
+       TRACE("  VMKernelAddressSpace::_AllocateRange(address: %#" B_PRIxADDR
+               ", size: %#" B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", 
reserved "
+               "allowed: %d)\n", address, size, addressSpec, 
allowReservedRange);
 
-       TRACE(("VMKernelAddressSpace::_InsertAreaSlot: address space %p, start "
-               "0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, 
start,
-               size, end, addressSpec, area));
+       // prepare size, alignment and the base address for the range search
+       size = ROUNDUP(size, B_PAGE_SIZE);
+       size_t alignment = B_PAGE_SIZE;
 
-       // do some sanity checking
-       if (start < fBase || size == 0 || end > fEndAddress
-               || start + (size - 1) > end)
-               return B_BAD_ADDRESS;
+       switch (addressSpec) {
+               case B_EXACT_ADDRESS:
+               {
+                       if (address % B_PAGE_SIZE != 0)
+                               return B_BAD_VALUE;
+                       break;
+               }
 
-       if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
-               // search for a reserved area
-               status_t status = _InsertAreaIntoReservedRegion(start, size, 
area);
-               if (status == B_OK || status == B_BAD_VALUE)
-                       return status;
+               case B_BASE_ADDRESS:
+                       address = ROUNDUP(address, B_PAGE_SIZE);
+                       break;
 
-               // There was no reserved area, and the slot doesn't seem to be 
used
-               // already
-               // TODO: this could be further optimized.
-       }
+               case B_ANY_KERNEL_BLOCK_ADDRESS:
+                       // align the memory to the next power of two of the size
+                       while (alignment < size)
+                               alignment <<= 1;
 
-       size_t alignment = B_PAGE_SIZE;
-       if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
-               // align the memory to the next power of two of the size
-               while (alignment < size)
-                       alignment <<= 1;
+                       // fall through...
+
+               case B_ANY_ADDRESS:
+               case B_ANY_KERNEL_ADDRESS:
+                       address = fBase;
+                       // TODO: remove this again when vm86 mode is moved into 
the kernel
+                       // completely (currently needs a userland address 
space!)
+                       if (address == USER_BASE)
+                               address = USER_BASE_ANY;
+                       break;
+
+               default:
+                       return B_BAD_VALUE;
        }
 
-       start = ROUNDUP(start, alignment);
+       // find a range
+       Range* range = _FindFreeRange(address, size, alignment, addressSpec,
+               allowReservedRange, address);
+       if (range == NULL)
+               return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : 
B_NO_MEMORY;
 
-       // walk up to the spot where we should start searching
-second_chance:
-       VMKernelAreaList::Iterator it = fAreas.GetIterator();
-       while ((next = it.Next()) != NULL) {
-               if (next->Base() > start + (size - 1)) {
-                       // we have a winner
-                       break;
+       TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
+               B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, 
range->size,
+               range->type);
+
+       // We have found a range. It might not be a perfect fit, in which case
+       // we have to split the range.
+       size_t rangeSize = range->size;
+
+       if (address == range->base) {
+               // allocation at the beginning of the range
+               if (range->size > size) {
+                       // only partial -- split the range
+                       Range* leftOverRange = new(nogrow) Range(address + size,
+                               range->size - size, range);
+                       if (leftOverRange == NULL)
+                               return B_NO_MEMORY;
+
+                       range->size = size;
+                       _InsertRange(leftOverRange);
                }
+       } else if (address + size == range->base + range->size) {
+               // allocation at the end of the range -- split the range
+               Range* leftOverRange = new(nogrow) Range(range->base,
+                       range->size - size, range);
+               if (leftOverRange == NULL)
+                       return B_NO_MEMORY;
 
-               last = next;
+               range->base = address;
+               range->size = size;
+               _InsertRange(leftOverRange);
+       } else {
+               // allocation in the middle of the range -- split the range in 
three
+               Range* leftOverRange1 = new(nogrow) Range(range->base,
+                       address - range->base, range);
+               if (leftOverRange1 == NULL)
+                       return B_NO_MEMORY;
+               Range* leftOverRange2 = new(nogrow) Range(address + size,
+                       range->size - size - leftOverRange1->size, range);
+               if (leftOverRange2 == NULL) {
+                       delete leftOverRange1;
+                       return B_NO_MEMORY;
+               }
+
+               range->base = address;
+               range->size = size;
+               _InsertRange(leftOverRange1);
+               _InsertRange(leftOverRange2);
        }
 
-       // find the right spot depending on the address specification - the area
-       // will be inserted directly after "last" ("next" is not referenced 
anymore)
+       // If the range is a free range, remove it from the respective free 
list.
+       if (range->type == Range::RANGE_FREE)
+               _FreeListRemoveRange(range, rangeSize);
 
+       IncrementChangeCount();
+
+       TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR 
", %#"
+               B_PRIxSIZE ")\n", range, range->base, range->size);
+
+       _range = range;
+       return B_OK;
+}
+
+
+VMKernelAddressSpace::Range*
+VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
+       size_t alignment, uint32 addressSpec, bool allowReservedRange,
+       addr_t& _foundAddress)
+{
+       TRACE("  VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
+               ", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", 
addressSpec: %#"
+               B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
+               addressSpec, allowReservedRange);
+
        switch (addressSpec) {
+               case B_BASE_ADDRESS:
+               {

[... truncated: 869 lines follow ...]

Other related posts:

  • » [haiku-commits] r34528 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm - ingo_weinhold