Author: bonefish Date: 2010-06-14 18:25:14 +0200 (Mon, 14 Jun 2010) New Revision: 37131 Changeset: http://dev.haiku-os.org/changeset/37131/haiku Modified: haiku/trunk/headers/os/drivers/KernelExport.h haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h haiku/trunk/headers/private/kernel/vm/vm.h haiku/trunk/headers/private/kernel/vm/vm_page.h haiku/trunk/headers/private/kernel/vm/vm_types.h haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp haiku/trunk/src/system/kernel/arch/x86/arch_int.cpp haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp haiku/trunk/src/system/kernel/arch/x86/vm86.cpp haiku/trunk/src/system/kernel/debug/debug_heap.cpp haiku/trunk/src/system/kernel/debug/tracing.cpp haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp haiku/trunk/src/system/kernel/elf.cpp haiku/trunk/src/system/kernel/port.cpp haiku/trunk/src/system/kernel/sem.cpp haiku/trunk/src/system/kernel/slab/MemoryManager.cpp haiku/trunk/src/system/kernel/team.cpp haiku/trunk/src/system/kernel/thread.cpp haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.h haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.cpp haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.h haiku/trunk/src/system/kernel/vm/vm.cpp haiku/trunk/src/system/kernel/vm/vm_page.cpp Log: * Introduced structures {virtual,physical}_address_restrictions, which specify restrictions for virtual/physical addresses. * vm_page_allocate_page_run(): - Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not taken into account. - Takes a physical_address_restrictions instead of base/limit and also supports alignment and boundary restrictions, now. * map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/ ReserveAddressRange() take a virtual_address_restrictions parameter, now. They also support an alignment independent from the range size. * create_area_etc(), vm_create_anonymous_area(): Take {virtual,physical}_address_restrictions parameters, now. * Removed no longer needed B_PHYSICAL_BASE_ADDRESS. * DMAResources: - Fixed potential overflows of uint32 when initializing from device node attributes. - Fixed bounce buffer creation TODOs: By using create_area_etc() with the new restrictions parameters we can directly support physical high address, boundary, and alignment. Modified: haiku/trunk/headers/os/drivers/KernelExport.h =================================================================== --- haiku/trunk/headers/os/drivers/KernelExport.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/headers/os/drivers/KernelExport.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -85,7 +85,6 @@ /* address specifications for mapping physical memory */ #define B_ANY_KERNEL_BLOCK_ADDRESS (B_ANY_KERNEL_ADDRESS + 1) -#define B_PHYSICAL_BASE_ADDRESS (B_ANY_KERNEL_ADDRESS + 2) /* area protection flags for the kernel */ #define B_KERNEL_READ_AREA 16 Modified: haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/headers/private/kernel/vm/VMAddressSpace.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -17,6 +17,9 @@ #include <vm/VMTranslationMap.h> +struct virtual_address_restrictions; + + struct VMAddressSpace { public: class AreaIterator; @@ -73,9 +76,11 @@ uint32 allocationFlags) = 0; virtual void DeleteArea(VMArea* area, uint32 allocationFlags) = 0; - virtual status_t InsertArea(void** _address, uint32 addressSpec, - size_t size, VMArea* area, - uint32 allocationFlags) = 0; + virtual status_t InsertArea(VMArea* area, size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 allocationFlags, void** _address) + = 0; virtual void RemoveArea(VMArea* area, uint32 allocationFlags) = 0; @@ -87,9 +92,11 @@ virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize, uint32 allocationFlags) = 0; - virtual status_t ReserveAddressRange(void** _address, - uint32 addressSpec, size_t size, - uint32 flags, uint32 allocationFlags) = 0; + virtual status_t ReserveAddressRange(size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 flags, uint32 allocationFlags, + void** _address) = 0; virtual status_t UnreserveAddressRange(addr_t address, size_t size, uint32 allocationFlags) = 0; virtual void UnreserveAllAddressRanges( Modified: haiku/trunk/headers/private/kernel/vm/vm.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/vm.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/headers/private/kernel/vm/vm.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -77,9 +77,11 @@ void forbid_page_faults(void); // private kernel only extension (should be moved somewhere else): -area_id create_area_etc(team_id team, const char *name, void **address, - uint32 addressSpec, uint32 size, uint32 lock, uint32 protection, - phys_addr_t physicalAddress, uint32 flags); +area_id create_area_etc(team_id team, const char *name, uint32 size, + uint32 lock, uint32 protection, uint32 flags, + const virtual_address_restrictions* virtualAddressRestrictions, + const physical_address_restrictions* physicalAddressRestrictions, + void **_address); area_id transfer_area(area_id id, void** _address, uint32 addressSpec, team_id target, bool kernel); @@ -87,9 +89,11 @@ status_t vm_unreserve_address_range(team_id team, void *address, addr_t size); status_t vm_reserve_address_range(team_id team, void **_address, uint32 addressSpec, addr_t size, uint32 flags); -area_id vm_create_anonymous_area(team_id team, const char *name, void **address, - uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection, - phys_addr_t physicalAddress, uint32 flags, bool kernel); +area_id vm_create_anonymous_area(team_id team, const char* name, addr_t size, + uint32 wiring, uint32 protection, uint32 flags, + const virtual_address_restrictions* virtualAddressRestrictions, + const physical_address_restrictions* physicalAddressRestrictions, + bool kernel, void** _address); area_id vm_map_physical_memory(team_id team, const char *name, void **address, uint32 addressSpec, addr_t size, uint32 protection, phys_addr_t physicalAddress, bool alreadyWired); Modified: haiku/trunk/headers/private/kernel/vm/vm_page.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/vm_page.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/headers/private/kernel/vm/vm_page.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -60,8 +60,8 @@ struct vm_page *vm_page_allocate_page(vm_page_reservation* reservation, uint32 flags); -struct vm_page *vm_page_allocate_page_run(uint32 flags, phys_addr_t base, - phys_addr_t limit, page_num_t length, int priority); +struct vm_page *vm_page_allocate_page_run(uint32 flags, page_num_t length, + const physical_address_restrictions* restrictions, int priority); struct vm_page *vm_page_at_index(int32 index); struct vm_page *vm_lookup_page(page_num_t pageNumber); bool vm_page_is_dummy(struct vm_page *page); Modified: haiku/trunk/headers/private/kernel/vm/vm_types.h =================================================================== --- haiku/trunk/headers/private/kernel/vm/vm_types.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/headers/private/kernel/vm/vm_types.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -30,6 +30,30 @@ typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link; +struct virtual_address_restrictions { + void* address; + // base or exact address, depending on address_specification + uint32 address_specification; + // address specification as passed to create_area() + size_t alignment; + // address alignment; overridden when + // address_specification == B_ANY_KERNEL_BLOCK_ADDRESS +}; + +struct physical_address_restrictions { + phys_addr_t low_address; + // lowest acceptable address + phys_addr_t high_address; + // lowest no longer acceptable address; for ranges: the + // highest acceptable non-inclusive end address + phys_size_t alignment; + // address alignment + phys_size_t boundary; + // multiples of which may not be crossed by the address + // range +}; + + typedef struct vm_page_mapping { vm_page_mapping_link page_link; vm_page_mapping_link area_link; Modified: haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp =================================================================== --- haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -539,8 +539,9 @@ uint32 count = size / B_PAGE_SIZE; if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) { + physical_address_restrictions restrictions = {}; memory->page = vm_page_allocate_page_run( - PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, 0, 0, count, + PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, count, &restrictions, VM_PRIORITY_SYSTEM); if (memory->page == NULL) return B_NO_MEMORY; Modified: haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -691,10 +691,14 @@ //i386_selector_init(gGDT); // pass the new gdt // allocate an area for the double fault stacks + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; create_area_etc(B_SYSTEM_TEAM, "double fault stacks", - (void**)&sDoubleFaultStacks, B_ANY_KERNEL_ADDRESS, kDoubleFaultStackSize * smp_get_num_cpus(), B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, + (void**)&sDoubleFaultStacks); X86PagingStructures* kernelPagingStructures = static_cast<X86VMTranslationMap*>( Modified: haiku/trunk/src/system/kernel/arch/x86/arch_int.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/arch_int.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/arch/x86/arch_int.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -1371,9 +1371,12 @@ if (cpuCount > 0) { size_t areaSize = ROUNDUP(cpuCount * idtSize, B_PAGE_SIZE); desc_table* idt; - area = create_area_etc(B_SYSTEM_TEAM, "idt", (void**)&idt, - B_ANY_KERNEL_ADDRESS, areaSize, B_CONTIGUOUS, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + area = create_area_etc(B_SYSTEM_TEAM, "idt", areaSize, B_CONTIGUOUS, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, (void**)&idt); if (area < 0) return area; Modified: haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -189,9 +189,13 @@ // structures size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); void* data; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", - &data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + PAGE_ALIGN(areaSize), B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, &data); if (dataArea < 0) return dataArea; Modified: haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -484,9 +484,13 @@ size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[kPAEPageTableEntryCount]); void* data; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", - &data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + PAGE_ALIGN(areaSize), B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, &data); if (dataArea < 0) return dataArea; @@ -800,11 +804,15 @@ } else { // no pages -- allocate one locker.Unlock(); - page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 0, 0x100000000LL, 1, + + physical_address_restrictions restrictions = {}; + restrictions.high_address = 0x100000000LL; + page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 1, &restrictions, VM_PRIORITY_SYSTEM); - DEBUG_PAGE_ACCESS_END(page); if (page == NULL) return NULL; + + DEBUG_PAGE_ACCESS_END(page); } // map the page Modified: haiku/trunk/src/system/kernel/arch/x86/vm86.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/vm86.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/arch/x86/vm86.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -546,9 +546,14 @@ if (ramSize < VM86_MIN_RAM_SIZE) ramSize = VM86_MIN_RAM_SIZE; - void *address = (void *)0; - state->ram_area = create_area_etc(team->id, "dos", &address, - B_EXACT_ADDRESS, ramSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0); + void *address; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = NULL; + virtualRestrictions.address_specification = B_EXACT_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + state->ram_area = create_area_etc(team->id, "dos", ramSize, B_NO_LOCK, + B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions, + &physicalRestrictions, &address); if (state->ram_area < B_OK) { ret = state->ram_area; TRACE("Could not create RAM area\n"); Modified: haiku/trunk/src/system/kernel/debug/debug_heap.cpp =================================================================== --- haiku/trunk/src/system/kernel/debug/debug_heap.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/debug/debug_heap.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -1,5 +1,5 @@ /* - * Copyright 2009, Ingo Weinhold, ingo_weinhold@xxxxxx + * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@xxxxxx * Distributed under the terms of the MIT License. */ @@ -291,9 +291,13 @@ { // create the heap area void* base; - area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", (void**)&base, - B_ANY_KERNEL_ADDRESS, KDEBUG_HEAP, B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", KDEBUG_HEAP, + B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, + CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions, + (void**)&base); if (area < 0) return; Modified: haiku/trunk/src/system/kernel/debug/tracing.cpp =================================================================== --- haiku/trunk/src/system/kernel/debug/tracing.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/debug/tracing.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -369,10 +369,14 @@ if (error != B_OK) return error; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; area = create_area_etc(B_SYSTEM_TEAM, "tracing log", - (void**)&metaData->fTraceOutputBuffer, B_ANY_KERNEL_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, + (void**)&metaData->fTraceOutputBuffer); if (area < 0) return area; @@ -411,13 +415,18 @@ { // search meta data in memory (from previous session) TracingMetaData* metaData; - addr_t metaDataAddress = kMetaDataBaseAddress; + phys_addr_t metaDataAddress = kMetaDataBaseAddress; for (; metaDataAddress <= kMetaDataBaseEndAddress; metaDataAddress += kMetaDataAddressIncrement) { - area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata", - (void**)&metaData, B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE, + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + physicalRestrictions.low_address = metaDataAddress; + physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE; + area_id create_area_etc(B_SYSTEM_TEAM, "tracing metadata", B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, - metaDataAddress, CREATE_AREA_DONT_CLEAR); + CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions, + (void**)&metaData); if (area < 0) continue; @@ -463,11 +472,17 @@ } // re-map the previous tracing buffer - void* buffer = fTraceOutputBuffer; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = fTraceOutputBuffer; + virtualRestrictions.address_specification = B_EXACT_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + physicalRestrictions.low_address = fPhysicalAddress; + physicalRestrictions.high_address = fPhysicalAddress + + ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE); area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log", - &buffer, B_EXACT_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE, - B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, - fPhysicalAddress, CREATE_AREA_DONT_CLEAR); + kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, + &virtualRestrictions, &physicalRestrictions, NULL); if (area < 0) { dprintf("Failed to init tracing meta data: Mapping tracing log " "buffer failed: %s\n", strerror(area)); @@ -475,7 +490,7 @@ } dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n", - buffer, kTraceOutputBufferSize + MAX_TRACE_SIZE); + fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE); // verify/repair the tracing entry list uint32 errorCount = 0; Modified: haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -11,6 +11,7 @@ #include <kernel.h> #include <util/AutoLock.h> +#include <vm/vm.h> #include "IORequest.h" @@ -117,19 +118,19 @@ uint32 value; if (gDeviceManagerModule.get_attr_uint32(node, B_DMA_ALIGNMENT, &value, true) == B_OK) - restrictions.alignment = value + 1; + restrictions.alignment = (generic_size_t)value + 1; if (gDeviceManagerModule.get_attr_uint32(node, B_DMA_BOUNDARY, &value, true) == B_OK) - restrictions.boundary = value + 1; + restrictions.boundary = (generic_size_t)value + 1; if (gDeviceManagerModule.get_attr_uint32(node, B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK) - restrictions.max_segment_size = value * blockSize; + restrictions.max_segment_size = (generic_size_t)value * blockSize; if (gDeviceManagerModule.get_attr_uint32(node, B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK) - restrictions.max_transfer_size = value * blockSize; + restrictions.max_transfer_size = (generic_size_t)value * blockSize; if (gDeviceManagerModule.get_attr_uint32(node, B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK) @@ -226,19 +227,16 @@ area_id area = -1; phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE); - if (fRestrictions.alignment > B_PAGE_SIZE) { - dprintf("dma buffer restrictions not yet implemented: alignment %" - B_PRIuGENADDR "\n", fRestrictions.alignment); - } - if (fRestrictions.boundary > B_PAGE_SIZE) { - dprintf("dma buffer restrictions not yet implemented: boundary %" - B_PRIuGENADDR "\n", fRestrictions.boundary); - } - - bounceBuffer = (void*)fRestrictions.low_address; -// TODO: We also need to enforce the boundary restrictions. - area = create_area("dma buffer", &bounceBuffer, B_PHYSICAL_BASE_ADDRESS, - size, B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + physicalRestrictions.low_address = fRestrictions.low_address; + physicalRestrictions.high_address = fRestrictions.high_address; + physicalRestrictions.alignment = fRestrictions.alignment; + physicalRestrictions.boundary = fRestrictions.boundary; + area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &virtualRestrictions, + &physicalRestrictions, &bounceBuffer); if (area < B_OK) return area; @@ -251,10 +249,7 @@ physicalBase = entry.address; - if (fRestrictions.high_address < physicalBase + size) { - delete_area(area); - return B_NO_MEMORY; - } + ASSERT(fRestrictions.high_address >= physicalBase + size); DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer; if (buffer == NULL) { Modified: haiku/trunk/src/system/kernel/elf.cpp =================================================================== --- haiku/trunk/src/system/kernel/elf.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/elf.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -1881,9 +1881,13 @@ snprintf(regionName, B_OS_NAME_LENGTH, "%s_bss%d", baseName, i); regionAddress += fileUpperBound; - id = create_area_etc(team->id, regionName, - (void **)®ionAddress, B_EXACT_ADDRESS, bssSize, - B_NO_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = regionAddress; + virtualRestrictions.address_specification = B_EXACT_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + id = create_area_etc(team->id, regionName, bssSize, B_NO_LOCK, + B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions, + &physicalRestrictions, (void**)®ionAddress); if (id < B_OK) { dprintf("error allocating bss area: %s!\n", strerror(id)); status = B_NOT_AN_EXECUTABLE; Modified: haiku/trunk/src/system/kernel/port.cpp =================================================================== --- haiku/trunk/src/system/kernel/port.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/port.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -645,9 +645,12 @@ size_t size = sizeof(struct port_entry) * sMaxPorts; // create and initialize ports table - sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", (void**)&sPorts, - B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", size, B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, (void**)&sPorts); if (sPortArea < 0) { panic("unable to allocate kernel port table!\n"); return sPortArea; Modified: haiku/trunk/src/system/kernel/sem.cpp =================================================================== --- haiku/trunk/src/system/kernel/sem.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/sem.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -1,5 +1,5 @@ /* - * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@xxxxxxx + * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx * Copyright 2002-2010, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx * Distributed under the terms of the MIT License. * @@ -417,9 +417,13 @@ sMaxSems <<= 1; // create and initialize semaphore table - area = create_area_etc(B_SYSTEM_TEAM, "sem_table", (void **)&sSems, - B_ANY_KERNEL_ADDRESS, sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + area = create_area_etc(B_SYSTEM_TEAM, "sem_table", + sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, + &virtualRestrictions, &physicalRestrictions, (void**)&sSems); if (area < 0) panic("unable to allocate semaphore table!\n"); Modified: haiku/trunk/src/system/kernel/slab/MemoryManager.cpp =================================================================== --- haiku/trunk/src/system/kernel/slab/MemoryManager.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/slab/MemoryManager.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -598,14 +598,18 @@ if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) return B_WOULD_BLOCK; + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification + = (flags & CACHE_ALIGN_ON_SIZE) != 0 + ? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; area_id area = create_area_etc(VMAddressSpace::KernelID(), - "slab large raw allocation", &_pages, - (flags & CACHE_ALIGN_ON_SIZE) != 0 - ? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS, - size, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, + "slab large raw allocation", size, B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? CREATE_AREA_DONT_WAIT : 0) - | CREATE_AREA_DONT_CLEAR); + | CREATE_AREA_DONT_CLEAR, + &virtualRestrictions, &physicalRestrictions, &_pages); return area >= 0 ? B_OK : area; } Modified: haiku/trunk/src/system/kernel/team.cpp =================================================================== --- haiku/trunk/src/system/kernel/team.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/team.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -865,10 +865,15 @@ static status_t create_team_user_data(struct team* team) { - void* address = (void*)KERNEL_USER_DATA_BASE; + void* address; size_t size = 4 * B_PAGE_SIZE; - team->user_data_area = create_area_etc(team->id, "user area", &address, - B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE; + virtualRestrictions.address_specification = B_BASE_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + team->user_data_area = create_area_etc(team->id, "user area", size, + B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions, + &physicalRestrictions, &address); if (team->user_data_area < 0) return team->user_data_area; @@ -1048,9 +1053,13 @@ // the exact location at the end of the user stack area sprintf(userStackName, "%s_main_stack", team->name); - thread->user_stack_area = create_area_etc(team->id, userStackName, - (void**)&thread->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK, - B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = (void*)thread->user_stack_base; + virtualRestrictions.address_specification = B_EXACT_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; + thread->user_stack_area = create_area_etc(team->id, userStackName, sizeLeft, + B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, + &virtualRestrictions, &physicalRestrictions, NULL); if (thread->user_stack_area < 0) { dprintf("team_create_thread_start: could not create default user stack " "region: %s\n", strerror(thread->user_stack_area)); Modified: haiku/trunk/src/system/kernel/thread.cpp =================================================================== --- haiku/trunk/src/system/kernel/thread.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/thread.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -554,10 +554,15 @@ snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack", attributes.name, thread->id); + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address = (void*)thread->user_stack_base; + virtualRestrictions.address_specification = B_BASE_ADDRESS; + physical_address_restrictions physicalRestrictions = {}; thread->user_stack_area = create_area_etc(team->id, stack_name, - (void **)&thread->user_stack_base, B_BASE_ADDRESS, - thread->user_stack_size + TLS_SIZE, B_NO_LOCK, - B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0); + thread->user_stack_size + TLS_SIZE, B_NO_LOCK, + B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, + &virtualRestrictions, &physicalRestrictions, + (void**)&thread->user_stack_base); if (thread->user_stack_area < B_OK || arch_thread_init_tls(thread) < B_OK) { // great, we have a fully running thread without a (usable) Modified: haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -164,17 +164,20 @@ You need to hold the VMAddressSpace write lock. */ status_t -VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec, - size_t size, VMArea* _area, uint32 allocationFlags) +VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size, + const virtual_address_restrictions* addressRestrictions, + uint32 allocationFlags, void** _address) { TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE - ", %p \"%s\")\n", *_address, addressSpec, size, _area, _area->name); + ", %p \"%s\")\n", addressRestrictions->address, + addressRestrictions->address_specification, size, _area, _area->name); VMKernelArea* area = static_cast<VMKernelArea*>(_area); Range* range; - status_t error = _AllocateRange((addr_t)*_address, addressSpec, size, - addressSpec == B_EXACT_ADDRESS, allocationFlags, range); + status_t error = _AllocateRange(addressRestrictions, size, + addressRestrictions->address_specification == B_EXACT_ADDRESS, + allocationFlags, range); if (error != B_OK) return error; @@ -184,7 +187,8 @@ area->SetBase(range->base); area->SetSize(range->size); - *_address = (void*)area->Base(); + if (_address != NULL) + *_address = (void*)area->Base(); fFreeSpace -= area->Size(); PARANOIA_CHECK_STRUCTURES(); @@ -356,11 +360,13 @@ status_t -VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec, - size_t size, uint32 flags, uint32 allocationFlags) +VMKernelAddressSpace::ReserveAddressRange(size_t size, + const virtual_address_restrictions* addressRestrictions, + uint32 flags, uint32 allocationFlags, void** _address) { TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#" - B_PRIxSIZE ", %#" B_PRIx32 ")\n", *_address, addressSpec, size, flags); + B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address, + addressRestrictions->address_specification, size, flags); // Don't allow range reservations, if the address space is about to be // deleted. @@ -368,7 +374,7 @@ return B_BAD_TEAM_ID; Range* range; - status_t error = _AllocateRange((addr_t)*_address, addressSpec, size, false, + status_t error = _AllocateRange(addressRestrictions, size, false, allocationFlags, range); if (error != B_OK) return error; @@ -377,7 +383,8 @@ range->reserved.base = range->base; range->reserved.flags = flags; - *_address = (void*)range->base; + if (_address != NULL) + *_address = (void*)range->base; Get(); PARANOIA_CHECK_STRUCTURES(); @@ -529,19 +536,23 @@ status_t -VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec, +VMKernelAddressSpace::_AllocateRange( + const virtual_address_restrictions* addressRestrictions, size_t size, bool allowReservedRange, uint32 allocationFlags, Range*& _range) { - TRACE(" VMKernelAddressSpace::_AllocateRange(address: %#" B_PRIxADDR - ", size: %#" B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved " - "allowed: %d)\n", address, size, addressSpec, allowReservedRange); + TRACE(" VMKernelAddressSpace::_AllocateRange(address: %p, size: %#" + B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n", + addressRestrictions->address, size, + addressRestrictions->address_specification, allowReservedRange); // prepare size, alignment and the base address for the range search + addr_t address = (addr_t)addressRestrictions->address; size = ROUNDUP(size, B_PAGE_SIZE); - size_t alignment = B_PAGE_SIZE; + size_t alignment = addressRestrictions->alignment != 0 + ? addressRestrictions->alignment : B_PAGE_SIZE; - switch (addressSpec) { + switch (addressRestrictions->address_specification) { case B_EXACT_ADDRESS: { if (address % B_PAGE_SIZE != 0) @@ -574,10 +585,13 @@ } // find a range - Range* range = _FindFreeRange(address, size, alignment, addressSpec, - allowReservedRange, address); - if (range == NULL) - return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY; + Range* range = _FindFreeRange(address, size, alignment, + addressRestrictions->address_specification, allowReservedRange, + address); + if (range == NULL) { + return addressRestrictions->address_specification == B_EXACT_ADDRESS + ? B_BAD_VALUE : B_NO_MEMORY; + } TRACE(" VMKernelAddressSpace::_AllocateRange() found range:(%p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size, Modified: haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.h =================================================================== --- haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/vm/VMKernelAddressSpace.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -27,9 +27,10 @@ uint32 protection, uint32 allocationFlags); virtual void DeleteArea(VMArea* area, uint32 allocationFlags); - virtual status_t InsertArea(void** _address, uint32 addressSpec, - size_t size, VMArea* area, - uint32 allocationFlags); + virtual status_t InsertArea(VMArea* area, size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 allocationFlags, void** _address); virtual void RemoveArea(VMArea* area, uint32 allocationFlags); @@ -41,9 +42,11 @@ virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize, uint32 allocationFlags); - virtual status_t ReserveAddressRange(void** _address, - uint32 addressSpec, size_t size, - uint32 flags, uint32 allocationFlags); + virtual status_t ReserveAddressRange(size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 flags, uint32 allocationFlags, + void** _address); virtual status_t UnreserveAddressRange(addr_t address, size_t size, uint32 allocationFlags); virtual void UnreserveAllAddressRanges( @@ -67,9 +70,10 @@ void _InsertRange(Range* range); void _RemoveRange(Range* range); - status_t _AllocateRange(addr_t address, - uint32 addressSpec, size_t size, - bool allowReservedRange, + status_t _AllocateRange( + const virtual_address_restrictions* + addressRestrictions, + size_t size, bool allowReservedRange, uint32 allocationFlags, Range*& _range); Range* _FindFreeRange(addr_t start, size_t size, size_t alignment, uint32 addressSpec, Modified: haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -121,22 +121,23 @@ You need to hold the VMAddressSpace write lock. */ status_t -VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec, - size_t size, VMArea* _area, uint32 allocationFlags) +VMUserAddressSpace::InsertArea(VMArea* _area, size_t size, + const virtual_address_restrictions* addressRestrictions, + uint32 allocationFlags, void** _address) { VMUserArea* area = static_cast<VMUserArea*>(_area); addr_t searchBase, searchEnd; status_t status; - switch (addressSpec) { + switch (addressRestrictions->address_specification) { case B_EXACT_ADDRESS: - searchBase = (addr_t)*_address; - searchEnd = (addr_t)*_address + (size - 1); + searchBase = (addr_t)addressRestrictions->address; + searchEnd = (addr_t)addressRestrictions->address + (size - 1); break; case B_BASE_ADDRESS: - searchBase = (addr_t)*_address; + searchBase = (addr_t)addressRestrictions->address; searchEnd = fEndAddress; break; @@ -155,10 +156,12 @@ return B_BAD_VALUE; } - status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area, - allocationFlags); + status = _InsertAreaSlot(searchBase, size, searchEnd, + addressRestrictions->address_specification, + addressRestrictions->alignment, area, allocationFlags); if (status == B_OK) { - *_address = (void*)area->Base(); + if (_address != NULL) + *_address = (void*)area->Base(); fFreeSpace -= area->Size(); } @@ -276,8 +279,9 @@ status_t -VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec, - size_t size, uint32 flags, uint32 allocationFlags) +VMUserAddressSpace::ReserveAddressRange(size_t size, + const virtual_address_restrictions* addressRestrictions, + uint32 flags, uint32 allocationFlags, void** _address) { // check to see if this address space has entered DELETE state if (fDeleting) { @@ -290,8 +294,8 @@ if (area == NULL) return B_NO_MEMORY; - status_t status = InsertArea(_address, addressSpec, size, area, - allocationFlags); + status_t status = InsertArea(area, size, addressRestrictions, + allocationFlags, _address); if (status != B_OK) { area->~VMUserArea(); free_etc(area, allocationFlags); @@ -453,7 +457,8 @@ /*! Must be called with this address space's write lock held */ status_t VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end, - uint32 addressSpec, VMUserArea* area, uint32 allocationFlags) + uint32 addressSpec, size_t alignment, VMUserArea* area, + uint32 allocationFlags) { VMUserArea* last = NULL; VMUserArea* next; @@ -480,7 +485,8 @@ // TODO: this could be further optimized. } - size_t alignment = B_PAGE_SIZE; + if (alignment == 0) + alignment = B_PAGE_SIZE; if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) { // align the memory to the next power of two of the size while (alignment < size) Modified: haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.h =================================================================== --- haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.h 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/vm/VMUserAddressSpace.h 2010-06-14 16:25:14 UTC (rev 37131) @@ -25,9 +25,10 @@ uint32 protection, uint32 allocationFlags); virtual void DeleteArea(VMArea* area, uint32 allocationFlags); - virtual status_t InsertArea(void** _address, uint32 addressSpec, - size_t size, VMArea* area, - uint32 allocationFlags); + virtual status_t InsertArea(VMArea* area, size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 allocationFlags, void** _address); virtual void RemoveArea(VMArea* area, uint32 allocationFlags); @@ -39,9 +40,11 @@ virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize, uint32 allocationFlags); - virtual status_t ReserveAddressRange(void** _address, - uint32 addressSpec, size_t size, - uint32 flags, uint32 allocationFlags); + virtual status_t ReserveAddressRange(size_t size, + const virtual_address_restrictions* + addressRestrictions, + uint32 flags, uint32 allocationFlags, + void** _address); virtual status_t UnreserveAddressRange(addr_t address, size_t size, uint32 allocationFlags); virtual void UnreserveAllAddressRanges( @@ -55,7 +58,8 @@ uint32 allocationFlags); status_t _InsertAreaSlot(addr_t start, addr_t size, addr_t end, uint32 addressSpec, - VMUserArea* area, uint32 allocationFlags); + size_t alignment, VMUserArea* area, + uint32 allocationFlags); private: VMUserAreaList fAreas; Modified: haiku/trunk/src/system/kernel/vm/vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/vm.cpp 2010-06-13 19:58:43 UTC (rev 37130) +++ haiku/trunk/src/system/kernel/vm/vm.cpp 2010-06-14 16:25:14 UTC (rev 37131) @@ -267,9 +267,10 @@ bool isWrite, bool isUser, vm_page** wirePage, VMAreaWiredRange* wiredRange = NULL); static status_t map_backing_store(VMAddressSpace* addressSpace, - VMCache* cache, void** _virtualAddress, off_t offset, addr_t size, - uint32 addressSpec, int wiring, int protection, int mapping, - VMArea** _area, const char* areaName, uint32 flags, bool kernel); + VMCache* cache, off_t offset, const char* areaName, addr_t size, int wiring, + int protection, int mapping, uint32 flags, + const virtual_address_restrictions* addressRestrictions, bool kernel, + VMArea** _area, void** _virtualAddress); // #pragma mark - @@ -670,12 +671,14 @@ // first cache to it and resize the first cache. // map the second area + virtual_address_restrictions addressRestrictions = {}; + addressRestrictions.address = (void*)secondBase; + addressRestrictions.address_specification = B_EXACT_ADDRESS; VMArea* secondArea; - void* secondBaseAddress = (void*)secondBase; - error = map_backing_store(addressSpace, cache, &secondBaseAddress, - area->cache_offset + (secondBase - area->Base()), secondSize, - B_EXACT_ADDRESS, area->wiring, area->protection, REGION_NO_PRIVATE_MAP, - &secondArea, area->name, 0, kernel); + error = map_backing_store(addressSpace, cache, + area->cache_offset + (secondBase - area->Base()), area->name, + secondSize, area->wiring, area->protection, REGION_NO_PRIVATE_MAP, 0, + &addressRestrictions, kernel, &secondArea, NULL); if (error != B_OK) { addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); return error; @@ -740,15 +743,16 @@ \a size) is wired. */ static status_t -map_backing_store(VMAddressSpace* addressSpace, VMCache* cache, - void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec, - int wiring, int protection, int mapping, VMArea** _area, - const char* areaName, uint32 flags, bool kernel) +map_backing_store(VMAddressSpace* addressSpace, VMCache* cache, off_t offset, + const char* areaName, addr_t size, int wiring, int protection, int mapping, + uint32 flags, const virtual_address_restrictions* addressRestrictions, + bool kernel, VMArea** _area, void** _virtualAddress) { TRACE(("map_backing_store: aspace %p, cache %p, virtual %p, offset 0x%Lx, " [... truncated: 442 lines follow ...]