Author: bonefish Date: 2009-12-03 13:41:11 +0100 (Thu, 03 Dec 2009) New Revision: 34459 Changeset: http://dev.haiku-os.org/changeset/34459/haiku Modified: haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h haiku/trunk/headers/private/kernel/vm/VMArea.h haiku/trunk/src/system/kernel/elf.cpp haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp haiku/trunk/src/system/kernel/vm/VMArea.cpp haiku/trunk/src/system/kernel/vm/vm.cpp Log: * Changed the address space area list to doubly linked. The reason is to simplify migration of the area management, but as a side effect, it also makes area deletion O(1) (instead of O(n), n == number of areas in the address space). * Moved more area management functionality from vm.cpp to VMAddressSpace and VMArea structure creation to VMArea. Made the list and list link members itself private. * VMAddressSpace tracks its amount of free space, now. This also replaces the previous mechanism to do that only for the kernel address space. It was broken anyway, since delete_area() subtracted the area size instead of adding it. * vm_free_unused_boot_loader_range(): - lastEnd could be set to a value < start, which could cause memory outside of the given range to be unmapped. Haven't checked whether this could happen in practice -- if so, it would be seriously unhealthy. - The range between the end of the last area in the range and the end of the range would never be freed. - Fixed potential integer overflows when computing addresses. Modified: haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h 2009-12-03 12:41:11 UTC (rev 34459) @@ -13,12 +13,14 @@ #include <OS.h> #include <vm/vm_translation_map.h> +#include <vm/VMArea.h> -struct VMArea; +struct VMAddressSpace { +public: + class Iterator; - -struct VMAddressSpace { +public: VMAddressSpace(team_id id, addr_t base, size_t size, bool kernel); ~VMAddressSpace(); @@ -29,6 +31,7 @@ team_id ID() const { return fID; } addr_t Base() const { return fBase; } size_t Size() const { return fSize; } + size_t FreeSpace() const { return fFreeSpace; } bool IsBeingDeleted() const { return fDeleting; } vm_translation_map& TranslationMap() { return fTranslationMap; } @@ -54,9 +57,18 @@ void IncrementChangeCount() { fChangeCount++; } - VMArea* LookupArea(addr_t address); + VMArea* FirstArea() const + { return fAreas.Head(); } + VMArea* NextArea(VMArea* area) const + { return fAreas.GetNext(area); } + + VMArea* LookupArea(addr_t address) const; + status_t InsertArea(void** _address, uint32 addressSpec, + addr_t size, VMArea* area); void RemoveArea(VMArea* area); + inline Iterator GetIterator(); + static status_t Create(team_id teamID, addr_t base, size_t size, bool kernel, VMAddressSpace** _addressSpace); @@ -77,32 +89,77 @@ void Dump() const; private: + status_t _InsertAreaIntoReservedRegion(addr_t start, + size_t size, VMArea* area); + status_t _InsertAreaSlot(addr_t start, addr_t size, + addr_t end, uint32 addressSpec, + VMArea* area); + static int _DumpCommand(int argc, char** argv); static int _DumpListCommand(int argc, char** argv); -public: - VMArea* areas; +private: + friend class Iterator; -private: struct HashDefinition; private: VMAddressSpace* fHashTableLink; addr_t fBase; size_t fSize; + size_t fFreeSpace; rw_lock fLock; team_id fID; int32 fRefCount; int32 fFaultCount; int32 fChangeCount; vm_translation_map fTranslationMap; - VMArea* fAreaHint; + VMAddressSpaceAreaList fAreas; + mutable VMArea* fAreaHint; bool fDeleting; static VMAddressSpace* sKernelAddressSpace; }; +class VMAddressSpace::Iterator { +public: + Iterator() + { + } + Iterator(VMAddressSpace* addressSpace) + : + fIterator(addressSpace->fAreas.GetIterator()) + { + } + + bool HasNext() const + { + return fIterator.HasNext(); + } + + VMArea* Next() + { + return fIterator.Next(); + } + + void Rewind() + { + fIterator.Rewind(); + } + +private: + VMAddressSpaceAreaList::Iterator fIterator; +}; + + +inline VMAddressSpace::Iterator +VMAddressSpace::GetIterator() +{ + return Iterator(this); +} + + #ifdef __cplusplus extern "C" { #endif Modified: haiku/trunk/headers/private/kernel/vm/VMArea.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/VMArea.h 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/headers/private/kernel/vm/VMArea.h 2009-12-03 12:41:11 UTC (rev 34459) @@ -11,6 +11,7 @@ #include <lock.h> +#include <util/DoublyLinkedList.h> #include <util/OpenHashTable.h> #include <vm/vm_types.h> @@ -36,16 +37,47 @@ uint8* page_protections; struct VMAddressSpace* address_space; - struct VMArea* address_space_next; struct VMArea* cache_next; struct VMArea* cache_prev; struct VMArea* hash_next; - bool ContainsAddress(addr_t address) const - { return address >= base && address <= base + (size - 1); } + bool ContainsAddress(addr_t address) const + { return address >= base + && address <= base + (size - 1); } + + static VMArea* Create(VMAddressSpace* addressSpace, + const char* name, uint32 wiring, + uint32 protection); + static VMArea* CreateReserved(VMAddressSpace* addressSpace, + uint32 flags); + + DoublyLinkedListLink<VMArea>& AddressSpaceLink() + { return fAddressSpaceLink; } + const DoublyLinkedListLink<VMArea>& AddressSpaceLink() const + { return fAddressSpaceLink; } + +private: + DoublyLinkedListLink<VMArea> fAddressSpaceLink; }; +struct VMAddressSpaceAreaGetLink { + inline DoublyLinkedListLink<VMArea>* operator()(VMArea* area) const + { + return &area->AddressSpaceLink(); + } + + inline const DoublyLinkedListLink<VMArea>* operator()( + const VMArea* area) const + { + return &area->AddressSpaceLink(); + } +}; + +typedef DoublyLinkedList<VMArea, VMAddressSpaceAreaGetLink> + VMAddressSpaceAreaList; + + struct VMAreaHashDefinition { typedef area_id KeyType; typedef VMArea ValueType; Modified: haiku/trunk/src/system/kernel/elf.cpp =================================================================== --- haiku/trunk/src/system/kernel/elf.cpp 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/src/system/kernel/elf.cpp 2009-12-03 12:41:11 UTC (rev 34459) @@ -1303,11 +1303,11 @@ status_t Init(struct team* team) { // find the runtime loader debug area - VMArea* area = team->address_space->areas; - while (area != NULL) { + VMArea* area; + for (VMAddressSpace::Iterator it = team->address_space->GetIterator(); + (area = it.Next()) != NULL;) { if (strcmp(area->name, RUNTIME_LOADER_DEBUG_AREA_NAME) == 0) break; - area = area->address_space_next; } if (area == NULL) Modified: haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/src/system/kernel/vm/VMAddressSpace.cpp 2009-12-03 12:41:11 UTC (rev 34459) @@ -34,6 +34,17 @@ #define ASPACE_HASH_TABLE_SIZE 1024 +/*! Verifies that an area with the given aligned base and size fits into + the spot defined by base and limit and checks for overflows. +*/ +static inline bool +is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit) +{ + return (alignedBase >= base && alignedBase + (size - 1) > alignedBase + && alignedBase + (size - 1) <= limit); +} + + // #pragma mark - AddressSpaceHashDefinition @@ -76,10 +87,9 @@ VMAddressSpace::VMAddressSpace(team_id id, addr_t base, size_t size, bool kernel) : - areas(NULL), - fBase(base), fSize(size), + fFreeSpace(size), fID(id), fRefCount(1), fFaultCount(0), @@ -266,14 +276,14 @@ //! You must hold the address space's read lock. VMArea* -VMAddressSpace::LookupArea(addr_t address) +VMAddressSpace::LookupArea(addr_t address) const { // check the area hint first - VMArea* area = fAreaHint; - if (area != NULL && area->ContainsAddress(address)) - return area; + if (fAreaHint != NULL && fAreaHint->ContainsAddress(address)) + return fAreaHint; - for (area = areas; area != NULL; area = area->address_space_next) { + for (VMAddressSpaceAreaList::ConstIterator it = fAreas.GetIterator(); + VMArea* area = it.Next();) { if (area->id == RESERVED_AREA_ID) continue; @@ -287,32 +297,66 @@ } +/*! This inserts the area you pass into the address space. + It will also set the "_address" argument to its base address when + the call succeeds. + You need to hold the VMAddressSpace write lock. +*/ +status_t +VMAddressSpace::InsertArea(void** _address, uint32 addressSpec, addr_t size, + VMArea* area) +{ + addr_t searchBase, searchEnd; + status_t status; + + switch (addressSpec) { + case B_EXACT_ADDRESS: + searchBase = (addr_t)*_address; + searchEnd = (addr_t)*_address + (size - 1); + break; + + case B_BASE_ADDRESS: + searchBase = (addr_t)*_address; + searchEnd = fBase + (fSize - 1); + break; + + case B_ANY_ADDRESS: + case B_ANY_KERNEL_ADDRESS: + case B_ANY_KERNEL_BLOCK_ADDRESS: + searchBase = fBase; + // TODO: remove this again when vm86 mode is moved into the kernel + // completely (currently needs a userland address space!) + if (searchBase == USER_BASE) + searchBase = USER_BASE_ANY; + searchEnd = fBase + (fSize - 1); + break; + + default: + return B_BAD_VALUE; + } + + status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area); + if (status == B_OK) { + *_address = (void*)area->base; + fFreeSpace -= area->size; + } + + return status; +} + + //! You must hold the address space's write lock. void VMAddressSpace::RemoveArea(VMArea* area) { - VMArea* temp = areas; - VMArea* last = NULL; + fAreas.Remove(area); - while (temp != NULL) { - if (area == temp) { - if (last != NULL) { - last->address_space_next = temp->address_space_next; - } else { - areas = temp->address_space_next; - } - IncrementChangeCount(); - break; - } - last = temp; - temp = temp->address_space_next; - } - if (area == fAreaHint) - fAreaHint = NULL; + if (area->id != RESERVED_AREA_ID) { + IncrementChangeCount(); + fFreeSpace += area->size; - if (temp == NULL) { - panic("VMAddressSpace::RemoveArea(): area not found in aspace's area " - "list\n"); + if (area == fAreaHint) + fAreaHint = NULL; } } @@ -332,8 +376,8 @@ kprintf("area_list:\n"); - VMArea* area; - for (area = areas; area != NULL; area = area->address_space_next) { + for (VMAddressSpaceAreaList::ConstIterator it = fAreas.GetIterator(); + VMArea* area = it.Next();) { kprintf(" area 0x%lx: ", area->id); kprintf("base_addr = 0x%lx ", area->base); kprintf("size = 0x%lx ", area->size); @@ -343,6 +387,313 @@ } +/*! Finds a reserved area that covers the region spanned by \a start and + \a size, inserts the \a area into that region and makes sure that + there are reserved regions for the remaining parts. +*/ +status_t +VMAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size, + VMArea* area) +{ + VMArea* next; + + for (VMAddressSpaceAreaList::Iterator it = fAreas.GetIterator(); + (next = it.Next()) != NULL;) { + if (next->base <= start + && next->base + (next->size - 1) >= start + (size - 1)) { + // This area covers the requested range + if (next->id != RESERVED_AREA_ID) { + // but it's not reserved space, it's a real area + return B_BAD_VALUE; + } + + break; + } + } + + if (next == NULL) + return B_ENTRY_NOT_FOUND; + + // Now we have to transfer the requested part of the reserved + // range to the new area - and remove, resize or split the old + // reserved area. + + if (start == next->base) { + // the area starts at the beginning of the reserved range + fAreas.Insert(next, area); + + if (size == next->size) { + // the new area fully covers the reversed range + fAreas.Remove(next); + Put(); + free(next); + } else { + // resize the reserved range behind the area + next->base += size; + next->size -= size; + } + } else if (start + size == next->base + next->size) { + // the area is at the end of the reserved range + fAreas.Insert(fAreas.GetNext(next), area); + + // resize the reserved range before the area + next->size = start - next->base; + } else { + // the area splits the reserved range into two separate ones + // we need a new reserved area to cover this space + VMArea* reserved = VMArea::CreateReserved(this, next->protection); + if (reserved == NULL) + return B_NO_MEMORY; + + Get(); + fAreas.Insert(fAreas.GetNext(next), reserved); + fAreas.Insert(reserved, area); + + // resize regions + reserved->size = next->base + next->size - start - size; + next->size = start - next->base; + reserved->base = start + size; + reserved->cache_offset = next->cache_offset; + } + + area->base = start; + area->size = size; + IncrementChangeCount(); + + return B_OK; +} + + +/*! Must be called with this address space's write lock held */ +status_t +VMAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end, + uint32 addressSpec, VMArea* area) +{ + VMArea* last = NULL; + VMArea* next; + bool foundSpot = false; + + TRACE(("VMAddressSpace::InsertAreaSlot: address space %p, start 0x%lx, " + "size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start, + size, end, addressSpec, area)); + + // do some sanity checking + if (start < fBase || size == 0 || end > fBase + fSize - 1 + || start + (size - 1) > end) + return B_BAD_ADDRESS; + + if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) { + // search for a reserved area + status_t status = _InsertAreaIntoReservedRegion(start, size, area); + if (status == B_OK || status == B_BAD_VALUE) + return status; + + // There was no reserved area, and the slot doesn't seem to be used + // already + // TODO: this could be further optimized. + } + + size_t alignment = B_PAGE_SIZE; + if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) { + // align the memory to the next power of two of the size + while (alignment < size) + alignment <<= 1; + } + + start = ROUNDUP(start, alignment); + + // walk up to the spot where we should start searching +second_chance: + VMAddressSpaceAreaList::Iterator it = fAreas.GetIterator(); + while ((next = it.Next()) != NULL) { + if (next->base > start + (size - 1)) { + // we have a winner + break; + } + + last = next; + } + + // find the right spot depending on the address specification - the area + // will be inserted directly after "last" ("next" is not referenced anymore) + + switch (addressSpec) { + case B_ANY_ADDRESS: + case B_ANY_KERNEL_ADDRESS: + case B_ANY_KERNEL_BLOCK_ADDRESS: + { + // find a hole big enough for a new area + if (last == NULL) { + // see if we can build it at the beginning of the virtual map + addr_t alignedBase = ROUNDUP(fBase, alignment); + if (is_valid_spot(fBase, alignedBase, size, + next == NULL ? end : next->base)) { + foundSpot = true; + area->base = alignedBase; + break; + } + + last = next; + next = it.Next(); + } + + // keep walking + while (next != NULL) { + addr_t alignedBase = ROUNDUP(last->base + last->size, + alignment); + if (is_valid_spot(last->base + (last->size - 1), alignedBase, + size, next->base)) { + foundSpot = true; + area->base = alignedBase; + break; + } + + last = next; + next = it.Next(); + } + + if (foundSpot) + break; + + addr_t alignedBase = ROUNDUP(last->base + last->size, alignment); + if (is_valid_spot(last->base + (last->size - 1), alignedBase, + size, end)) { + // got a spot + foundSpot = true; + area->base = alignedBase; + break; + } else if (area->id != RESERVED_AREA_ID) { + // We didn't find a free spot - if there are any reserved areas, + // we can now test those for free space + // TODO: it would make sense to start with the biggest of them + it.Rewind(); + next = it.Next(); + for (last = NULL; next != NULL; next = it.Next()) { + if (next->id != RESERVED_AREA_ID) { + last = next; + continue; + } + + // TODO: take free space after the reserved area into + // account! + addr_t alignedBase = ROUNDUP(next->base, alignment); + if (next->base == alignedBase && next->size == size) { + // The reserved area is entirely covered, and thus, + // removed + fAreas.Remove(next); + + foundSpot = true; + area->base = alignedBase; + free(next); + break; + } + + if ((next->protection & RESERVED_AVOID_BASE) == 0 + && alignedBase == next->base && next->size >= size) { + // The new area will be placed at the beginning of the + // reserved area and the reserved area will be offset + // and resized + foundSpot = true; + next->base += size; + next->size -= size; + area->base = alignedBase; + break; + } + + if (is_valid_spot(next->base, alignedBase, size, + next->base + (next->size - 1))) { + // The new area will be placed at the end of the + // reserved area, and the reserved area will be resized + // to make space + alignedBase = ROUNDDOWN(next->base + next->size - size, + alignment); + + foundSpot = true; + next->size = alignedBase - next->base; + area->base = alignedBase; + last = next; + break; + } + + last = next; + } + } + break; + } + + case B_BASE_ADDRESS: + { + // find a hole big enough for a new area beginning with "start" + if (last == NULL) { + // see if we can build it at the beginning of the specified + // start + if (next == NULL || next->base > start + (size - 1)) { + foundSpot = true; + area->base = start; + break; + } + + last = next; + next = it.Next(); + } + + // keep walking + while (next != NULL) { + if (next->base - (last->base + last->size) >= size) { + // we found a spot (it'll be filled up below) + break; + } + + last = next; + next = it.Next(); + } + + addr_t lastEnd = last->base + (last->size - 1); + if (next != NULL || end - lastEnd >= size) { + // got a spot + foundSpot = true; + if (lastEnd < start) + area->base = start; + else + area->base = lastEnd + 1; + break; + } + + // we didn't find a free spot in the requested range, so we'll + // try again without any restrictions + start = fBase; + addressSpec = B_ANY_ADDRESS; + last = NULL; + goto second_chance; + } + + case B_EXACT_ADDRESS: + // see if we can create it exactly here + if ((last == NULL || last->base + (last->size - 1) < start) + && (next == NULL || next->base > start + (size - 1))) { + foundSpot = true; + area->base = start; + break; + } + break; + default: + return B_BAD_VALUE; + } + + if (!foundSpot) + return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY; + + area->size = size; + if (last) + fAreas.Insert(fAreas.GetNext(last), area); + else + fAreas.Insert(fAreas.Head(), area); + + IncrementChangeCount(); + return B_OK; +} + + /*static*/ int VMAddressSpace::_DumpCommand(int argc, char** argv) { @@ -380,8 +731,8 @@ while (VMAddressSpace* space = it.Next()) { int32 areaCount = 0; off_t areaSize = 0; - for (VMArea* area = space->areas; area != NULL; - area = area->address_space_next) { + for (VMAddressSpaceAreaList::Iterator it = space->fAreas.GetIterator(); + VMArea* area = it.Next();) { if (area->id != RESERVED_AREA_ID && area->cache->type != CACHE_TYPE_NULL) { areaCount++; Modified: haiku/trunk/src/system/kernel/vm/VMArea.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMArea.cpp 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/src/system/kernel/vm/VMArea.cpp 2009-12-03 12:41:11 UTC (rev 34459) @@ -1,11 +1,16 @@ /* * Copyright 2009, Ingo Weinhold, ingo_weinhold@xxxxxxx + * Copyright 2002-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx * Distributed under the terms of the MIT License. + * + * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. + * Distributed under the terms of the NewOS License. */ #include <vm/VMArea.h> +#include <heap.h> #include <vm/vm_priv.h> @@ -14,8 +19,72 @@ rw_lock VMAreaHash::sLock = RW_LOCK_INITIALIZER("area hash"); VMAreaHashTable VMAreaHash::sTable; +static area_id sNextAreaID = 1; +// #pragma mark - VMArea + + +/*static*/ VMArea* +VMArea::Create(VMAddressSpace* addressSpace, const char* name, + uint32 wiring, uint32 protection) +{ + // restrict the area name to B_OS_NAME_LENGTH + size_t length = strlen(name) + 1; + if (length > B_OS_NAME_LENGTH) + length = B_OS_NAME_LENGTH; + + VMArea* area = (VMArea*)malloc_nogrow(sizeof(VMArea)); + if (area == NULL) + return NULL; + + area->name = (char*)malloc_nogrow(length); + if (area->name == NULL) { + free(area); + return NULL; + } + strlcpy(area->name, name, length); + + area->id = atomic_add(&sNextAreaID, 1); + area->base = 0; + area->size = 0; + area->protection = protection; + area->wiring = wiring; + area->memory_type = 0; + + area->cache = NULL; + area->cache_offset = 0; + + area->address_space = addressSpace; + area->cache_next = area->cache_prev = NULL; + area->hash_next = NULL; + new (&area->mappings) VMAreaMappings; + area->page_protections = NULL; + + return area; +} + + +/*static*/ VMArea* +VMArea::CreateReserved(VMAddressSpace* addressSpace, uint32 flags) +{ + VMArea* reserved = (VMArea*)malloc_nogrow(sizeof(VMArea)); + if (reserved == NULL) + return NULL; + + memset(reserved, 0, sizeof(VMArea)); + reserved->id = RESERVED_AREA_ID; + // this marks it as reserved space + reserved->protection = flags; + reserved->address_space = addressSpace; + + return reserved; +} + + +// #pragma mark - VMAreaHash + + /*static*/ status_t VMAreaHash::Init() { Modified: haiku/trunk/src/system/kernel/vm/vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/vm.cpp 2009-12-03 12:24:17 UTC (rev 34458) +++ haiku/trunk/src/system/kernel/vm/vm.cpp 2009-12-03 12:41:11 UTC (rev 34459) @@ -103,7 +103,6 @@ }; -static area_id sNextAreaID = 1; static mutex sMappingLock = MUTEX_INITIALIZER("page mappings"); static mutex sAreaCacheLock = MUTEX_INITIALIZER("area->cache"); @@ -136,9 +135,6 @@ VMArea** _area, const char* areaName, bool unmapAddressRange, bool kernel); -static size_t sKernelAddressSpaceLeft = KERNEL_SIZE; - - // #pragma mark - @@ -281,450 +277,6 @@ } -static VMArea* -create_reserved_area_struct(VMAddressSpace* addressSpace, uint32 flags) -{ - VMArea* reserved = (VMArea*)malloc_nogrow(sizeof(VMArea)); - if (reserved == NULL) - return NULL; - - memset(reserved, 0, sizeof(VMArea)); - reserved->id = RESERVED_AREA_ID; - // this marks it as reserved space - reserved->protection = flags; - reserved->address_space = addressSpace; - - return reserved; -} - - -static VMArea* -create_area_struct(VMAddressSpace* addressSpace, const char* name, - uint32 wiring, uint32 protection) -{ - // restrict the area name to B_OS_NAME_LENGTH - size_t length = strlen(name) + 1; - if (length > B_OS_NAME_LENGTH) - length = B_OS_NAME_LENGTH; - - VMArea* area = (VMArea*)malloc_nogrow(sizeof(VMArea)); - if (area == NULL) - return NULL; - - area->name = (char*)malloc_nogrow(length); - if (area->name == NULL) { - free(area); - return NULL; - } - strlcpy(area->name, name, length); - - area->id = atomic_add(&sNextAreaID, 1); - area->base = 0; - area->size = 0; - area->protection = protection; - area->wiring = wiring; - area->memory_type = 0; - - area->cache = NULL; - area->cache_offset = 0; - - area->address_space = addressSpace; - area->address_space_next = NULL; - area->cache_next = area->cache_prev = NULL; - area->hash_next = NULL; - new (&area->mappings) VMAreaMappings; - area->page_protections = NULL; - - return area; -} - - -/*! Finds a reserved area that covers the region spanned by \a start and - \a size, inserts the \a area into that region and makes sure that - there are reserved regions for the remaining parts. -*/ -static status_t -find_reserved_area(VMAddressSpace* addressSpace, addr_t start, - addr_t size, VMArea* area) -{ - VMArea* last = NULL; - VMArea* next; - - next = addressSpace->areas; - while (next != NULL) { - if (next->base <= start - && next->base + (next->size - 1) >= start + (size - 1)) { - // This area covers the requested range - if (next->id != RESERVED_AREA_ID) { - // but it's not reserved space, it's a real area - return B_BAD_VALUE; - } - - break; - } - - last = next; - next = next->address_space_next; - } - - if (next == NULL) - return B_ENTRY_NOT_FOUND; - - // Now we have to transfer the requested part of the reserved - // range to the new area - and remove, resize or split the old - // reserved area. - - if (start == next->base) { - // the area starts at the beginning of the reserved range - if (last) - last->address_space_next = area; - else - addressSpace->areas = area; - - if (size == next->size) { - // the new area fully covers the reversed range - area->address_space_next = next->address_space_next; - addressSpace->Put(); - free(next); - } else { - // resize the reserved range behind the area - area->address_space_next = next; - next->base += size; - next->size -= size; - } - } else if (start + size == next->base + next->size) { - // the area is at the end of the reserved range - area->address_space_next = next->address_space_next; - next->address_space_next = area; - - // resize the reserved range before the area - next->size = start - next->base; - } else { - // the area splits the reserved range into two separate ones - // we need a new reserved area to cover this space - VMArea* reserved = create_reserved_area_struct(addressSpace, - next->protection); - if (reserved == NULL) - return B_NO_MEMORY; - - addressSpace->Get(); - reserved->address_space_next = next->address_space_next; - area->address_space_next = reserved; - next->address_space_next = area; - - // resize regions - reserved->size = next->base + next->size - start - size; - next->size = start - next->base; - reserved->base = start + size; - reserved->cache_offset = next->cache_offset; - } - - area->base = start; - area->size = size; - addressSpace->IncrementChangeCount(); - - return B_OK; -} - - -/*! Verifies that an area with the given aligned base and size fits into - the spot defined by base and limit and does check for overflows. -*/ -static inline bool -is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit) -{ - return (alignedBase >= base && alignedBase + (size - 1) > alignedBase - && alignedBase + (size - 1) <= limit); -} - - -/*! Must be called with this address space's write lock held */ -static status_t -find_and_insert_area_slot(VMAddressSpace* addressSpace, addr_t start, - addr_t size, addr_t end, uint32 addressSpec, VMArea* area) -{ - VMArea* last = NULL; - VMArea* next; - bool foundSpot = false; - - TRACE(("find_and_insert_area_slot: address space %p, start 0x%lx, " - "size %ld, end 0x%lx, addressSpec %ld, area %p\n", addressSpace, start, - size, end, addressSpec, area)); - - // do some sanity checking - if (start < addressSpace->Base() || size == 0 - || end > addressSpace->Base() + (addressSpace->Size() - 1) - || start + (size - 1) > end) - return B_BAD_ADDRESS; - - if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) { - // search for a reserved area - status_t status = find_reserved_area(addressSpace, start, size, area); - if (status == B_OK || status == B_BAD_VALUE) - return status; - - // There was no reserved area, and the slot doesn't seem to be used - // already [... truncated: 560 lines follow ...]