Author: bonefish Date: 2009-12-01 18:40:04 +0100 (Tue, 01 Dec 2009) New Revision: 34423 Changeset: http://dev.haiku-os.org/changeset/34423/haiku Modified: haiku/trunk/headers/private/kernel/arch/vm.h haiku/trunk/headers/private/kernel/vm.h haiku/trunk/headers/private/kernel/vm_types.h haiku/trunk/src/system/kernel/arch/arm/arch_vm.cpp haiku/trunk/src/system/kernel/arch/m68k/arch_vm.cpp haiku/trunk/src/system/kernel/arch/mipsel/arch_vm.cpp haiku/trunk/src/system/kernel/arch/ppc/arch_vm.cpp haiku/trunk/src/system/kernel/arch/x86/arch_debug.cpp haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp haiku/trunk/src/system/kernel/elf.cpp haiku/trunk/src/system/kernel/vm/vm.cpp haiku/trunk/src/system/kernel/vm/vm_address_space.cpp haiku/trunk/src/system/kernel/vm/vm_cache.cpp Log: vm_area -> VMArea Modified: haiku/trunk/headers/private/kernel/arch/vm.h =================================================================== --- haiku/trunk/headers/private/kernel/arch/vm.h 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/headers/private/kernel/arch/vm.h 2009-12-01 17:40:04 UTC (rev 34423) @@ -15,8 +15,8 @@ struct kernel_args; -struct vm_area; struct VMAddressSpace; +struct VMArea; #ifdef __cplusplus @@ -31,9 +31,9 @@ struct VMAddressSpace *to); bool arch_vm_supports_protection(uint32 protection); -status_t arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase, +status_t arch_vm_set_memory_type(struct VMArea *area, addr_t physicalBase, uint32 type); -void arch_vm_unset_memory_type(struct vm_area *area); +void arch_vm_unset_memory_type(struct VMArea *area); #ifdef __cplusplus } Modified: haiku/trunk/headers/private/kernel/vm.h =================================================================== --- haiku/trunk/headers/private/kernel/vm.h 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/headers/private/kernel/vm.h 2009-12-01 17:40:04 UTC (rev 34423) @@ -19,8 +19,8 @@ struct team; struct system_memory_info; struct VMAddressSpace; +struct VMArea; struct VMCache; -struct vm_area; struct vm_page; struct vnode; @@ -76,7 +76,7 @@ area_id vm_map_file(team_id aid, const char *name, void **address, uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping, bool unmapAddressRange, int fd, off_t offset); -struct VMCache *vm_area_get_locked_cache(struct vm_area *area); +struct VMCache *vm_area_get_locked_cache(struct VMArea *area); void vm_area_put_locked_cache(struct VMCache *cache); area_id vm_create_null_area(team_id team, const char *name, void **address, uint32 addressSpec, addr_t size); @@ -87,7 +87,7 @@ area_id sourceArea, bool kernel); status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel); status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache); -struct vm_area *vm_area_lookup(struct VMAddressSpace *addressSpace, +struct VMArea *vm_area_lookup(struct VMAddressSpace *addressSpace, addr_t address); status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type); status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr); @@ -95,11 +95,11 @@ int32 vm_test_map_activation(struct vm_page *page, bool *_modified); void vm_clear_map_flags(struct vm_page *page, uint32 flags); void vm_remove_all_page_mappings(struct vm_page *page, uint32 *_flags); -bool vm_unmap_page(struct vm_area* area, addr_t virtualAddress, +bool vm_unmap_page(struct VMArea* area, addr_t virtualAddress, bool preserveModified); -status_t vm_unmap_pages(struct vm_area *area, addr_t base, size_t length, +status_t vm_unmap_pages(struct VMArea *area, addr_t base, size_t length, bool preserveModified); -status_t vm_map_page(struct vm_area *area, struct vm_page *page, addr_t address, +status_t vm_map_page(struct VMArea *area, struct vm_page *page, addr_t address, uint32 protection); status_t vm_get_physical_page(addr_t paddr, addr_t* vaddr, void** _handle); Modified: haiku/trunk/headers/private/kernel/vm_types.h =================================================================== --- haiku/trunk/headers/private/kernel/vm_types.h 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/headers/private/kernel/vm_types.h 2009-12-01 17:40:04 UTC (rev 34423) @@ -32,7 +32,7 @@ vm_page_mapping_link page_link; vm_page_mapping_link area_link; struct vm_page *page; - struct vm_area *area; + struct VMArea *area; } vm_page_mapping; class DoublyLinkedPageLink { @@ -66,7 +66,7 @@ typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink> vm_page_mappings; typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink> - vm_area_mappings; + VMAreaMappings; typedef uint32 page_num_t; @@ -192,8 +192,8 @@ void AddConsumer(VMCache* consumer); - status_t InsertAreaLocked(vm_area* area); - status_t RemoveArea(vm_area* area); + status_t InsertAreaLocked(VMArea* area); + status_t RemoveArea(VMArea* area); status_t WriteModified(); status_t SetMinimalCommitment(off_t commitment); @@ -242,7 +242,7 @@ public: - struct vm_area* areas; + struct VMArea* areas; struct list_link consumer_link; struct list consumers; // list of caches that use this cache as a source @@ -289,7 +289,7 @@ }; -struct vm_area { +struct VMArea { char* name; area_id id; addr_t base; @@ -302,14 +302,14 @@ vint32 no_cache_change; off_t cache_offset; uint32 cache_type; - vm_area_mappings mappings; + VMAreaMappings mappings; uint8* page_protections; struct VMAddressSpace* address_space; - struct vm_area* address_space_next; - struct vm_area* cache_next; - struct vm_area* cache_prev; - struct vm_area* hash_next; + struct VMArea* address_space_next; + struct VMArea* cache_next; + struct VMArea* cache_prev; + struct VMArea* hash_next; }; @@ -319,8 +319,8 @@ }; struct VMAddressSpace { - struct vm_area* areas; - struct vm_area* area_hint; + struct VMArea* areas; + struct VMArea* area_hint; rw_lock lock; addr_t base; addr_t size; Modified: haiku/trunk/src/system/kernel/arch/arm/arch_vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/arm/arch_vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/arm/arch_vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -88,13 +88,13 @@ void -arch_vm_unset_memory_type(vm_area *area) +arch_vm_unset_memory_type(VMArea *area) { } status_t -arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type) +arch_vm_set_memory_type(VMArea *area, addr_t physicalBase, uint32 type) { if (type == 0) return B_OK; Modified: haiku/trunk/src/system/kernel/arch/m68k/arch_vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/m68k/arch_vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/m68k/arch_vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -117,13 +117,13 @@ void -arch_vm_unset_memory_type(vm_area *area) +arch_vm_unset_memory_type(VMArea *area) { } status_t -arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type) +arch_vm_set_memory_type(VMArea *area, addr_t physicalBase, uint32 type) { if (type == 0) return B_OK; Modified: haiku/trunk/src/system/kernel/arch/mipsel/arch_vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/mipsel/arch_vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/mipsel/arch_vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -81,14 +81,14 @@ void -arch_vm_unset_memory_type(vm_area* area) +arch_vm_unset_memory_type(VMArea* area) { #warning IMPLEMENT arch_vm_unset_memory_type } status_t -arch_vm_set_memory_type(vm_area* area, addr_t physicalBase, uint32 type) +arch_vm_set_memory_type(VMArea* area, addr_t physicalBase, uint32 type) { #warning IMPLEMENT arch_vm_set_memory_type return B_ERROR; Modified: haiku/trunk/src/system/kernel/arch/ppc/arch_vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/ppc/arch_vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/ppc/arch_vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -157,13 +157,13 @@ void -arch_vm_unset_memory_type(vm_area *area) +arch_vm_unset_memory_type(VMArea *area) { } status_t -arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type) +arch_vm_set_memory_type(VMArea *area, addr_t physicalBase, uint32 type) { if (type == 0) return B_OK; Modified: haiku/trunk/src/system/kernel/arch/x86/arch_debug.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/arch_debug.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/x86/arch_debug.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -294,7 +294,7 @@ kprintf(" + 0x%04lx\n", eip - baseAddress); } else { - vm_area *area = NULL; + VMArea *area = NULL; if (thread != NULL && thread->team != NULL && thread->team->address_space != NULL) { area = vm_area_lookup(thread->team->address_space, eip); @@ -641,7 +641,7 @@ (void *)baseAddress, eip - baseAddress); } } else { - vm_area *area = NULL; + VMArea *area = NULL; if (thread->team->address_space != NULL) area = vm_area_lookup(thread->team->address_space, eip); if (area != NULL) { Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -647,7 +647,7 @@ void -arch_vm_unset_memory_type(struct vm_area *area) +arch_vm_unset_memory_type(struct VMArea *area) { if (area->memory_type == 0) return; @@ -657,7 +657,7 @@ status_t -arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase, +arch_vm_set_memory_type(struct VMArea *area, addr_t physicalBase, uint32 type) { area->memory_type = type >> MEMORY_TYPE_SHIFT; Modified: haiku/trunk/src/system/kernel/elf.cpp =================================================================== --- haiku/trunk/src/system/kernel/elf.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/elf.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -1302,7 +1302,7 @@ status_t Init(struct team* team) { // find the runtime loader debug area - vm_area* area = team->address_space->areas; + VMArea* area = team->address_space->areas; while (area != NULL) { if (strcmp(area->name, RUNTIME_LOADER_DEBUG_AREA_NAME) == 0) break; Modified: haiku/trunk/src/system/kernel/vm/vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/vm.cpp 2009-12-01 17:27:09 UTC (rev 34422) +++ haiku/trunk/src/system/kernel/vm/vm.cpp 2009-12-01 17:40:04 UTC (rev 34423) @@ -76,7 +76,7 @@ status_t SetTo(team_id team); void SetTo(VMAddressSpace* space, bool getNewReference); - status_t SetFromArea(area_id areaID, vm_area*& area); + status_t SetFromArea(area_id areaID, VMArea*& area); bool IsLocked() const { return fLocked; } bool Lock(); @@ -98,10 +98,10 @@ ~AddressSpaceWriteLocker(); status_t SetTo(team_id team); - status_t SetFromArea(area_id areaID, vm_area*& area); + status_t SetFromArea(area_id areaID, VMArea*& area); status_t SetFromArea(team_id team, area_id areaID, bool allowKernel, - vm_area*& area); - status_t SetFromArea(team_id team, area_id areaID, vm_area*& area); + VMArea*& area); + status_t SetFromArea(team_id team, area_id areaID, VMArea*& area); bool IsLocked() const { return fLocked; } void Unlock(); @@ -128,7 +128,7 @@ VMAddressSpace** _space = NULL); status_t AddAreaCacheAndLock(area_id areaID, bool writeLockThisOne, - bool writeLockOthers, vm_area*& _area, vm_cache** _cache = NULL); + bool writeLockOthers, VMArea*& _area, vm_cache** _cache = NULL); status_t Lock(); void Unlock(); @@ -176,13 +176,13 @@ { } - inline AreaCacheLocker(vm_area* area) + inline AreaCacheLocker(VMArea* area) : AutoLocker<vm_cache, AreaCacheLocking>() { SetTo(area); } - inline void SetTo(vm_area* area) + inline void SetTo(VMArea* area) { return AutoLocker<vm_cache, AreaCacheLocking>::SetTo( area != NULL ? vm_area_get_locked_cache(area) : NULL, true, true); @@ -217,14 +217,14 @@ // function declarations -static void delete_area(VMAddressSpace* addressSpace, vm_area* area); +static void delete_area(VMAddressSpace* addressSpace, VMArea* area); static VMAddressSpace* get_address_space_by_area_id(area_id id); static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address, bool isWrite, bool isUser); static status_t map_backing_store(VMAddressSpace* addressSpace, vm_cache* cache, void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec, int wiring, int protection, int mapping, - vm_area** _area, const char* areaName, bool unmapAddressRange, bool kernel); + VMArea** _area, const char* areaName, bool unmapAddressRange, bool kernel); static size_t sKernelAddressSpaceLeft = KERNEL_SIZE; @@ -308,7 +308,7 @@ status_t -AddressSpaceReadLocker::SetFromArea(area_id areaID, vm_area*& area) +AddressSpaceReadLocker::SetFromArea(area_id areaID, VMArea*& area) { fSpace = get_address_space_by_area_id(areaID); if (fSpace == NULL) @@ -317,7 +317,7 @@ rw_lock_read_lock(&fSpace->lock); rw_lock_read_lock(&sAreaHashLock); - area = (vm_area*)hash_lookup(sAreaHash, &areaID); + area = (VMArea*)hash_lookup(sAreaHash, &areaID); rw_lock_read_unlock(&sAreaHashLock); if (area == NULL || area->address_space != fSpace) { @@ -406,7 +406,7 @@ status_t -AddressSpaceWriteLocker::SetFromArea(area_id areaID, vm_area*& area) +AddressSpaceWriteLocker::SetFromArea(area_id areaID, VMArea*& area) { fSpace = get_address_space_by_area_id(areaID); if (fSpace == NULL) @@ -415,7 +415,7 @@ rw_lock_write_lock(&fSpace->lock); rw_lock_read_lock(&sAreaHashLock); - area = (vm_area*)hash_lookup(sAreaHash, &areaID); + area = (VMArea*)hash_lookup(sAreaHash, &areaID); rw_lock_read_unlock(&sAreaHashLock); if (area == NULL || area->address_space != fSpace) { @@ -430,11 +430,11 @@ status_t AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID, - bool allowKernel, vm_area*& area) + bool allowKernel, VMArea*& area) { rw_lock_read_lock(&sAreaHashLock); - area = (vm_area*)hash_lookup(sAreaHash, &areaID); + area = (VMArea*)hash_lookup(sAreaHash, &areaID); if (area != NULL && (area->address_space->id == team || (allowKernel && team == vm_kernel_address_space_id()))) { @@ -453,7 +453,7 @@ rw_lock_write_lock(&fSpace->lock); rw_lock_read_lock(&sAreaHashLock); - area = (vm_area*)hash_lookup(sAreaHash, &areaID); + area = (VMArea*)hash_lookup(sAreaHash, &areaID); rw_lock_read_unlock(&sAreaHashLock); if (area == NULL) { @@ -468,7 +468,7 @@ status_t AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID, - vm_area*& area) + VMArea*& area) { return SetFromArea(team, areaID, false, area); } @@ -672,7 +672,7 @@ */ status_t MultiAddressSpaceLocker::AddAreaCacheAndLock(area_id areaID, - bool writeLockThisOne, bool writeLockOthers, vm_area*& _area, + bool writeLockThisOne, bool writeLockOthers, VMArea*& _area, vm_cache** _cache) { // remember the original state @@ -688,7 +688,7 @@ // get the cache vm_cache* cache; - vm_area* area; + VMArea* area; status_t error; { AddressSpaceReadLocker locker; @@ -701,8 +701,8 @@ while (true) { // add all areas - vm_area* firstArea = cache->areas; - for (vm_area* current = firstArea; current; + VMArea* firstArea = cache->areas; + for (VMArea* current = firstArea; current; current = current->cache_next) { error = AddArea(current->id, current == area ? writeLockThisOne : writeLockOthers); @@ -723,7 +723,7 @@ // check whether the area is gone in the meantime rw_lock_read_lock(&sAreaHashLock); - area = (vm_area*)hash_lookup(sAreaHash, &areaID); + area = (VMArea*)hash_lookup(sAreaHash, &areaID); rw_lock_read_unlock(&sAreaHashLock); if (area == NULL) { @@ -895,7 +895,7 @@ static int area_compare(void* _area, const void* key) { - vm_area* area = (vm_area*)_area; + VMArea* area = (VMArea*)_area; const area_id* id = (const area_id*)key; if (area->id == *id) @@ -908,7 +908,7 @@ static uint32 area_hash(void* _area, const void* key, uint32 range) { - vm_area* area = (vm_area*)_area; + VMArea* area = (VMArea*)_area; const area_id* id = (const area_id*)key; if (area != NULL) @@ -925,7 +925,7 @@ rw_lock_read_lock(&sAreaHashLock); - vm_area* area = (vm_area*)hash_lookup(sAreaHash, &id); + VMArea* area = (VMArea*)hash_lookup(sAreaHash, &id); if (area != NULL) { addressSpace = area->address_space; atomic_add(&addressSpace->ref_count, 1); @@ -938,12 +938,12 @@ //! You need to have the address space locked when calling this function -static vm_area* +static VMArea* lookup_area(VMAddressSpace* addressSpace, area_id id) { rw_lock_read_lock(&sAreaHashLock); - vm_area* area = (vm_area*)hash_lookup(sAreaHash, &id); + VMArea* area = (VMArea*)hash_lookup(sAreaHash, &id); if (area != NULL && area->address_space != addressSpace) area = NULL; @@ -953,14 +953,14 @@ } -static vm_area* +static VMArea* create_reserved_area_struct(VMAddressSpace* addressSpace, uint32 flags) { - vm_area* reserved = (vm_area*)malloc_nogrow(sizeof(vm_area)); + VMArea* reserved = (VMArea*)malloc_nogrow(sizeof(VMArea)); if (reserved == NULL) return NULL; - memset(reserved, 0, sizeof(vm_area)); + memset(reserved, 0, sizeof(VMArea)); reserved->id = RESERVED_AREA_ID; // this marks it as reserved space reserved->protection = flags; @@ -970,7 +970,7 @@ } -static vm_area* +static VMArea* create_area_struct(VMAddressSpace* addressSpace, const char* name, uint32 wiring, uint32 protection) { @@ -979,7 +979,7 @@ if (length > B_OS_NAME_LENGTH) length = B_OS_NAME_LENGTH; - vm_area* area = (vm_area*)malloc_nogrow(sizeof(vm_area)); + VMArea* area = (VMArea*)malloc_nogrow(sizeof(VMArea)); if (area == NULL) return NULL; @@ -1004,7 +1004,7 @@ area->address_space_next = NULL; area->cache_next = area->cache_prev = NULL; area->hash_next = NULL; - new (&area->mappings) vm_area_mappings; + new (&area->mappings) VMAreaMappings; area->page_protections = NULL; return area; @@ -1017,10 +1017,10 @@ */ static status_t find_reserved_area(VMAddressSpace* addressSpace, addr_t start, - addr_t size, vm_area* area) + addr_t size, VMArea* area) { - vm_area* last = NULL; - vm_area* next; + VMArea* last = NULL; + VMArea* next; next = addressSpace->areas; while (next != NULL) { @@ -1074,7 +1074,7 @@ } else { // the area splits the reserved range into two separate ones // we need a new reserved area to cover this space - vm_area* reserved = create_reserved_area_struct(addressSpace, + VMArea* reserved = create_reserved_area_struct(addressSpace, next->protection); if (reserved == NULL) return B_NO_MEMORY; @@ -1113,10 +1113,10 @@ /*! Must be called with this address space's write lock held */ static status_t find_and_insert_area_slot(VMAddressSpace* addressSpace, addr_t start, - addr_t size, addr_t end, uint32 addressSpec, vm_area* area) + addr_t size, addr_t end, uint32 addressSpec, VMArea* area) { - vm_area* last = NULL; - vm_area* next; + VMArea* last = NULL; + VMArea* next; bool foundSpot = false; TRACE(("find_and_insert_area_slot: address space %p, start 0x%lx, " @@ -1353,7 +1353,7 @@ */ static status_t insert_area(VMAddressSpace* addressSpace, void** _address, - uint32 addressSpec, addr_t size, vm_area* area) + uint32 addressSpec, addr_t size, VMArea* area) { addr_t searchBase, searchEnd; status_t status; @@ -1398,7 +1398,7 @@ static inline void -set_area_page_protection(vm_area* area, addr_t pageAddress, uint32 protection) +set_area_page_protection(VMArea* area, addr_t pageAddress, uint32 protection) { protection &= B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA; uint32 pageIndex = (pageAddress - area->base) / B_PAGE_SIZE; @@ -1411,7 +1411,7 @@ static inline uint32 -get_area_page_protection(vm_area* area, addr_t pageAddress) +get_area_page_protection(VMArea* area, addr_t pageAddress) { if (area->page_protections == NULL) return area->protection; @@ -1436,8 +1436,8 @@ The address space must be write locked. */ static status_t -cut_area(VMAddressSpace* addressSpace, vm_area* area, addr_t address, - addr_t lastAddress, vm_area** _secondArea, bool kernel) +cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address, + addr_t lastAddress, VMArea** _secondArea, bool kernel) { // Does the cut range intersect with the area at all? addr_t areaLast = area->base + (area->size - 1); @@ -1510,7 +1510,7 @@ // first cache to it and resize the first cache. // map the second area - vm_area* secondArea; + VMArea* secondArea; void* secondBaseAddress = (void*)secondBase; status_t error = map_backing_store(addressSpace, cache, &secondBaseAddress, area->cache_offset + (secondBase - area->base), secondSize, @@ -1568,11 +1568,11 @@ addr_t lastAddress = address + (size - 1); // Check, whether the caller is allowed to modify the concerned areas. - vm_area* area; + VMArea* area; if (!kernel) { area = addressSpace->areas; while (area != NULL) { - vm_area* nextArea = area->address_space_next; + VMArea* nextArea = area->address_space_next; if (area->id != RESERVED_AREA_ID) { addr_t areaLast = area->base + (area->size - 1); @@ -1588,7 +1588,7 @@ area = addressSpace->areas; while (area != NULL) { - vm_area* nextArea = area->address_space_next; + VMArea* nextArea = area->address_space_next; if (area->id != RESERVED_AREA_ID) { addr_t areaLast = area->base + (area->size - 1); @@ -1616,7 +1616,7 @@ static status_t map_backing_store(VMAddressSpace* addressSpace, vm_cache* cache, void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec, - int wiring, int protection, int mapping, vm_area** _area, + int wiring, int protection, int mapping, VMArea** _area, const char* areaName, bool unmapAddressRange, bool kernel) { TRACE(("map_backing_store: aspace %p, cache %p, *vaddr %p, offset 0x%Lx, " @@ -1625,7 +1625,7 @@ addressSpec, wiring, protection, _area, areaName)); cache->AssertLocked(); - vm_area* area = create_area_struct(addressSpace, areaName, wiring, + VMArea* area = create_area_struct(addressSpace, areaName, wiring, protection); if (area == NULL) return B_NO_MEMORY; @@ -1749,7 +1749,7 @@ cache->scan_skip = 1; cache->Lock(); - vm_area* area; + VMArea* area; void* areaAddress = address; status = map_backing_store(addressSpace, cache, &areaAddress, 0, size, B_EXACT_ADDRESS, B_ALREADY_WIRED, 0, REGION_NO_PRIVATE_MAP, &area, name, @@ -1781,14 +1781,14 @@ // search area list and remove any matching reserved ranges - vm_area* area = locker.AddressSpace()->areas; - vm_area* last = NULL; + VMArea* area = locker.AddressSpace()->areas; + VMArea* last = NULL; while (area) { // the area must be completely part of the reserved range if (area->id == RESERVED_AREA_ID && area->base >= (addr_t)address && area->base + area->size <= (addr_t)address + size) { // remove reserved range - vm_area* reserved = area; + VMArea* reserved = area; if (last) last->address_space_next = reserved->address_space_next; else @@ -1826,7 +1826,7 @@ return B_BAD_TEAM_ID; } - vm_area* area = create_reserved_area_struct(locker.AddressSpace(), flags); + VMArea* area = create_reserved_area_struct(locker.AddressSpace(), flags); if (area == NULL) return B_NO_MEMORY; @@ -1852,7 +1852,7 @@ uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection, addr_t physicalAddress, uint32 flags, bool kernel) { - vm_area* area; + VMArea* area; vm_cache* cache; vm_page* page = NULL; bool isStack = (protection & B_STACK_AREA) != 0; @@ -2181,7 +2181,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address, uint32 addressSpec, addr_t size, uint32 protection, addr_t physicalAddress) { - vm_area* area; + VMArea* area; vm_cache* cache; addr_t mapOffset; @@ -2307,7 +2307,7 @@ cache->Lock(); - vm_area* area; + VMArea* area; result = map_backing_store(locker.AddressSpace(), cache, _address, 0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection, REGION_NO_PRIVATE_MAP, &area, name, false, true); @@ -2359,7 +2359,7 @@ vm_create_null_area(team_id team, const char* name, void** address, uint32 addressSpec, addr_t size) { - vm_area* area; + VMArea* area; vm_cache* cache; status_t status; @@ -2409,7 +2409,7 @@ /*! \a cache must be locked. The area's address space must be read-locked. */ static void -pre_map_area_pages(vm_area* area, VMCache* cache) +pre_map_area_pages(VMArea* area, VMCache* cache) { addr_t baseAddress = area->base; addr_t cacheOffset = area->cache_offset; @@ -2530,7 +2530,7 @@ cache->Lock(); - vm_area* area; + VMArea* area; status = map_backing_store(locker.AddressSpace(), cache, _address, offset, size, addressSpec, 0, protection, mapping, &area, name, unmapAddressRange, kernel); @@ -2574,7 +2574,7 @@ vm_cache* -vm_area_get_locked_cache(vm_area* area) +vm_area_get_locked_cache(VMArea* area) { mutex_lock(&sAreaCacheLock); @@ -2613,8 +2613,8 @@ uint32 addressSpec, uint32 protection, uint32 mapping, area_id sourceID, bool kernel) { - vm_area* newArea = NULL; - vm_area* sourceArea; + VMArea* newArea = NULL; + VMArea* sourceArea; // Check whether the source area exists and is cloneable. If so, mark it // B_SHARED_AREA, so that we don't get problems with copy-on-write. @@ -2745,10 +2745,10 @@ //! The address space must be write locked at this point static void -remove_area_from_address_space(VMAddressSpace* addressSpace, vm_area* area) +remove_area_from_address_space(VMAddressSpace* addressSpace, VMArea* area) { - vm_area* temp = addressSpace->areas; - vm_area* last = NULL; + VMArea* temp = addressSpace->areas; + VMArea* last = NULL; while (temp != NULL) { if (area == temp) { @@ -2775,7 +2775,7 @@ static void -delete_area(VMAddressSpace* addressSpace, vm_area* area) +delete_area(VMAddressSpace* addressSpace, VMArea* area) { rw_lock_write_lock(&sAreaHashLock); hash_remove(sAreaHash, area); @@ -2809,7 +2809,7 @@ TRACE(("vm_delete_area(team = 0x%lx, area = 0x%lx)\n", team, id)); AddressSpaceWriteLocker locker; - vm_area* area; + VMArea* area; status_t status = locker.SetFromArea(team, id, area); if (status != B_OK) return status; @@ -2858,7 +2858,7 @@ upperCache->areas = lowerCache->areas; lowerCache->areas = NULL; - for (vm_area* tempArea = upperCache->areas; tempArea != NULL; + for (VMArea* tempArea = upperCache->areas; tempArea != NULL; tempArea = tempArea->cache_next) { tempArea->cache = upperCache; upperCache->AcquireRefLocked(); @@ -2872,7 +2872,7 @@ // We now need to remap all pages from all of the cache's areas read-only, so // that a copy will be created on next write access - for (vm_area* tempArea = upperCache->areas; tempArea != NULL; + for (VMArea* tempArea = upperCache->areas; tempArea != NULL; tempArea = tempArea->cache_next) { // The area must be readable in the same way it was previously writable uint32 protection = B_KERNEL_READ_AREA; @@ -2910,7 +2910,7 @@ MultiAddressSpaceLocker locker; VMAddressSpace* targetAddressSpace; vm_cache* cache; - vm_area* source; + VMArea* source; status_t status = locker.AddTeam(team, true, &targetAddressSpace); if (status == B_OK) { status = locker.AddAreaCacheAndLock(sourceID, false, false, source, @@ -2931,7 +2931,7 @@ // First, create a cache on top of the source area, respectively use the // existing one, if this is a shared area. - vm_area* target; + VMArea* target; status = map_backing_store(targetAddressSpace, cache, _address, source->cache_offset, source->size, addressSpec, source->wiring, protection, sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP, @@ -2962,9 +2962,9 @@ //! You need to hold the cache lock when calling this function static int32 -count_writable_areas(vm_cache* cache, vm_area* ignoreArea) +count_writable_areas(vm_cache* cache, VMArea* ignoreArea) { - struct vm_area* area = cache->areas; + struct VMArea* area = cache->areas; uint32 count = 0; for (; area != NULL; area = area->cache_next) { @@ -2990,7 +2990,7 @@ // lock address spaces and cache MultiAddressSpaceLocker locker; vm_cache* cache; - vm_area* area; + VMArea* area; status_t status = locker.AddAreaCacheAndLock(areaID, true, false, area, &cache); AreaCacheLocker cacheLocker(cache); // already locked @@ -3103,7 +3103,7 @@ static inline addr_t -virtual_page_address(vm_area* area, vm_page* page) +virtual_page_address(VMArea* area, vm_page* page) { return area->base + ((page->cache_offset << PAGE_SHIFT) - area->cache_offset); @@ -3118,7 +3118,7 @@ vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); vm_page_mapping* mapping; while ((mapping = iterator.Next()) != NULL) { - vm_area* area = mapping->area; + VMArea* area = mapping->area; vm_translation_map* map = &area->address_space->translation_map; addr_t physicalAddress; @@ -3147,7 +3147,7 @@ vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); vm_page_mapping* mapping; while ((mapping = iterator.Next()) != NULL) { - vm_area* area = mapping->area; + VMArea* area = mapping->area; vm_translation_map* map = &area->address_space->translation_map; addr_t physicalAddress; @@ -3178,7 +3178,7 @@ vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); vm_page_mapping* mapping; while ((mapping = iterator.Next()) != NULL) { - vm_area* area = mapping->area; + VMArea* area = mapping->area; vm_translation_map* map = &area->address_space->translation_map; map->ops->lock(map); @@ -3204,7 +3204,7 @@ vm_page_mappings::Iterator iterator = queue.GetIterator(); vm_page_mapping* mapping; while ((mapping = iterator.Next()) != NULL) { - vm_area* area = mapping->area; + VMArea* area = mapping->area; vm_translation_map* map = &area->address_space->translation_map; addr_t physicalAddress; uint32 flags; @@ -3238,7 +3238,7 @@ bool -vm_unmap_page(vm_area* area, addr_t virtualAddress, bool preserveModified) +vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified) { vm_translation_map* map = &area->address_space->translation_map; @@ -3311,7 +3311,7 @@ status_t -vm_unmap_pages(vm_area* area, addr_t base, size_t size, bool preserveModified) +vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified) { vm_translation_map* map = &area->address_space->translation_map; addr_t end = base + (size - 1); @@ -3370,12 +3370,12 @@ >> PAGE_SHIFT; uint32 endOffset = startOffset + (size >> PAGE_SHIFT); vm_page_mapping* mapping; - vm_area_mappings queue; + VMAreaMappings queue; mutex_lock(&sMappingLock); map->ops->lock(map); - vm_area_mappings::Iterator iterator = area->mappings.GetIterator(); + VMAreaMappings::Iterator iterator = area->mappings.GetIterator(); while (iterator.HasNext()) { mapping = iterator.Next(); @@ -3407,7 +3407,7 @@ /*! When calling this function, you need to have pages reserved! */ status_t -vm_map_page(vm_area* area, vm_page* page, addr_t address, uint32 protection) +vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection) { vm_translation_map* map = &area->address_space->translation_map; vm_page_mapping* mapping = NULL; @@ -3746,7 +3746,7 @@ // areas if (cache->areas != NULL) { - vm_area* area = cache->areas; + VMArea* area = cache->areas; kprintf(", areas: %ld (%s, team: %ld)", area->id, area->name, area->address_space->id); @@ -3884,7 +3884,7 @@ #endif kprintf(" areas:\n"); - for (vm_area* area = cache->areas; area != NULL; area = area->cache_next) { + for (VMArea* area = cache->areas; area != NULL; area = area->cache_next) { kprintf(" area 0x%lx, %s\n", area->id, area->name); kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->base, area->size); kprintf("\tprotection: 0x%lx\n", area->protection); @@ -3921,7 +3921,7 @@ static void -dump_area_struct(vm_area* area, bool mappings) +dump_area_struct(VMArea* area, bool mappings) [... truncated: 342 lines follow ...]