[haiku-commits] r37070 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/arch/generic src/system/kernel/arch/m68k src/system/kernel/arch/x86/paging/32bit src/system/kernel/slab ...

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Wed, 9 Jun 2010 13:15:43 +0200 (CEST)

Author: bonefish
Date: 2010-06-09 13:15:43 +0200 (Wed, 09 Jun 2010)
New Revision: 37070
Changeset: http://dev.haiku-os.org/changeset/37070/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/vm.h
   
haiku/trunk/src/system/kernel/arch/generic/generic_vm_physical_page_mapper.cpp
   haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
   haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp
   haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
* vm_allocate_early(): Replace "bool blockAlign" parameter by a more flexible
  "addr_t aligmnent".
* X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(),
  generic_vm_physical_page_mapper_init(): Use vm_allocate_early()'s alignment
  feature instead of aligning by hand.


Modified: haiku/trunk/headers/private/kernel/vm/vm.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm.h  2010-06-08 22:12:20 UTC (rev 
37069)
+++ haiku/trunk/headers/private/kernel/vm/vm.h  2010-06-09 11:15:43 UTC (rev 
37070)
@@ -63,7 +63,7 @@
 void vm_free_kernel_args(struct kernel_args *args);
 void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
 addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
-                       size_t physicalSize, uint32 attributes, bool 
blockAlign);
+                       size_t physicalSize, uint32 attributes, addr_t 
alignment);
 
 void slab_init(struct kernel_args *args);
 void slab_init_post_area();

Modified: 
haiku/trunk/src/system/kernel/arch/generic/generic_vm_physical_page_mapper.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/generic/generic_vm_physical_page_mapper.cpp  
    2010-06-08 22:12:20 UTC (rev 37069)
+++ 
haiku/trunk/src/system/kernel/arch/generic/generic_vm_physical_page_mapper.cpp  
    2010-06-09 11:15:43 UTC (rev 37070)
@@ -248,29 +248,24 @@
        sIOSpaceChunkSize = ioSpaceChunkSize;
 
        // reserve virtual space for the IO space
-       // We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
-       // can guarantee to align the base address to ioSpaceChunkSize.
-       sIOSpaceBase = vm_allocate_early(args,
-               sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0, false);
+       sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0,
+               ioSpaceChunkSize);
        if (sIOSpaceBase == 0) {
                panic("generic_vm_physical_page_mapper_init(): Failed to 
reserve IO "
                        "space in virtual address space!");
                return B_ERROR;
        }
 
-       // align the base address to chunk size
-       sIOSpaceBase = (sIOSpaceBase + ioSpaceChunkSize - 1) / ioSpaceChunkSize
-               * ioSpaceChunkSize;
        *ioSpaceBase = sIOSpaceBase;
 
        // allocate some space to hold physical page mapping info
        paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
                sizeof(paddr_chunk_desc) * 1024, ~0L,
-               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
        num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
        virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
                sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
-               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
 
        TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables 
%p"*/"\n",
                paddr_desc, virtual_pmappings/*, iospace_pgtables*/));

Modified: 
haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-06-08 22:12:20 UTC (rev 37069)
+++ haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-06-09 11:15:43 UTC (rev 37070)
@@ -1262,7 +1262,7 @@
 
        iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
                B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 
NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
-               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
 
        TRACE(("iospace_pgtables %p\n", iospace_pgtables));
 

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp    
    2010-06-08 22:12:20 UTC (rev 37069)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp    
    2010-06-09 11:15:43 UTC (rev 37070)
@@ -84,22 +84,19 @@
 status_t
 X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
 {
-       // We reserve more, so that we can guarantee to align the base address
-       // to page table ranges.
-       addr_t virtualBase = vm_allocate_early(args,
-               1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, 
false);
+       // allocate a virtual address range for the pages to be mapped into
+       addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
+               kPageTableAlignment);
        if (virtualBase == 0) {
                panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve 
"
                        "physical page pool space in virtual address space!");
                return B_ERROR;
        }
-       virtualBase = (virtualBase + kPageTableAlignment - 1)
-               / kPageTableAlignment * kPageTableAlignment;
 
        // allocate memory for the page table and data
        size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
        page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
-               areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
 
        // prepare the page table
        _EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);

Modified: haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-06-08 
22:12:20 UTC (rev 37069)
+++ haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-06-09 
11:15:43 UTC (rev 37070)
@@ -1212,7 +1212,8 @@
        } else {
                // no areas yet -- allocate raw memory
                area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
-                       SLAB_AREA_SIZE, B_KERNEL_READ_AREA | 
B_KERNEL_WRITE_AREA, true);
+                       SLAB_AREA_SIZE, B_KERNEL_READ_AREA | 
B_KERNEL_WRITE_AREA,
+                       SLAB_AREA_SIZE);
                if (area == NULL) {
                        mutex_lock(&sLock);
                        return B_NO_MEMORY;

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-06-08 22:12:20 UTC (rev 
37069)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-06-09 11:15:43 UTC (rev 
37070)
@@ -3432,7 +3432,7 @@
 
 
 static addr_t
-allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
+allocate_early_virtual(kernel_args* args, size_t size, addr_t alignment)
 {
        size = PAGE_ALIGN(size);
 
@@ -3443,8 +3443,8 @@
                addr_t previousRangeEnd = args->virtual_allocated_range[i - 
1].start
                        + args->virtual_allocated_range[i - 1].size;
 
-               addr_t base = blockAlign
-                       ? ROUNDUP(previousRangeEnd, size) : previousRangeEnd;
+               addr_t base = alignment > 0
+                       ? ROUNDUP(previousRangeEnd, alignment) : 
previousRangeEnd;
 
                if (base >= KERNEL_BASE && base < rangeStart
                                && rangeStart - base >= size) {
@@ -3459,7 +3459,8 @@
        int lastEntryIndex = args->num_virtual_allocated_ranges - 1;
        addr_t lastRangeEnd = 
args->virtual_allocated_range[lastEntryIndex].start
                + args->virtual_allocated_range[lastEntryIndex].size;
-       addr_t base = blockAlign ? ROUNDUP(lastRangeEnd, size) : lastRangeEnd;
+       addr_t base = alignment > 0
+               ? ROUNDUP(lastRangeEnd, alignment) : lastRangeEnd;
        if (KERNEL_BASE + (KERNEL_SIZE - 1) - base >= size) {
                args->virtual_allocated_range[lastEntryIndex].size
                        += base + size - lastRangeEnd;
@@ -3470,8 +3471,8 @@
        addr_t rangeStart = args->virtual_allocated_range[0].start;
        if (rangeStart > KERNEL_BASE && rangeStart - KERNEL_BASE >= size) {
                base = rangeStart - size;
-               if (blockAlign)
-                       base = ROUNDDOWN(base, size);
+               if (alignment > 0)
+                       base = ROUNDDOWN(base, alignment);
 
                if (base >= KERNEL_BASE) {
                        args->virtual_allocated_range[0].start = base;
@@ -3532,13 +3533,13 @@
 */
 addr_t
 vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
-       uint32 attributes, bool blockAlign)
+       uint32 attributes, addr_t alignment)
 {
        if (physicalSize > virtualSize)
                physicalSize = virtualSize;
 
        // find the vaddr to allocate at
-       addr_t virtualBase = allocate_early_virtual(args, virtualSize, 
blockAlign);
+       addr_t virtualBase = allocate_early_virtual(args, virtualSize, 
alignment);
        //dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
 
        // map the pages
@@ -3587,7 +3588,7 @@
 #if    !USE_SLAB_ALLOCATOR_FOR_MALLOC
        // map in the new heap and initialize it
        addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
-               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
        TRACE(("heap at 0x%lx\n", heapBase));
        heap_init(heapBase, heapSize);
 #endif

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-06-08 22:12:20 UTC 
(rev 37069)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-06-09 11:15:43 UTC 
(rev 37070)
@@ -2849,7 +2849,7 @@
 
        // map in the new free page table
        sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
-               ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
+               ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
 
        TRACE(("vm_init: putting free_page_table @ %p, # ents %ld (size 
0x%x)\n",
                sPages, sNumPages, (unsigned int)(sNumPages * 
sizeof(vm_page))));


Other related posts:

  • » [haiku-commits] r37070 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/arch/generic src/system/kernel/arch/m68k src/system/kernel/arch/x86/paging/32bit src/system/kernel/slab ... - ingo_weinhold