[haiku-commits] r35726 - in haiku/trunk: headers/private/kernel/arch/x86 headers/private/kernel/boot/platform/bios_ia32 src/system/boot/platform/bios_ia32 src/system/kernel/arch/x86 src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 2 Mar 2010 19:13:06 +0100 (CET)

Author: bonefish
Date: 2010-03-02 19:13:06 +0100 (Tue, 02 Mar 2010)
New Revision: 35726
Changeset: http://dev.haiku-os.org/changeset/35726/haiku

Modified:
   haiku/trunk/headers/private/kernel/arch/x86/arch_kernel_args.h
   
haiku/trunk/headers/private/kernel/boot/platform/bios_ia32/platform_kernel_args.h
   haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
Boot loader (x86 mmu.cpp):
* Made the page table allocation more flexible. Got rid of sMaxVirtualAddress
  and added new virtual_end address to the architecture specific kernel args.
* Increased the virtual space we reserve for the kernel to 16 MB. That
  should suffice for quite a while. The previous 2 MB were too tight when
  building the kernel with debug info.
* mmu_init(): The way we were translating the BIOS' extended memory map to
  our physical ranges arrays was broken. Small gaps between usable memory
  ranges would be ignored and instead marked allocated. This worked fine for
  the boot loader and during the early kernel initialization, but after the
  VM has been fully set up it frees all physical ranges that have not been
  claimed otherwise. So those ranges could be entered into the free pages
  list and would be used later. This could possibly cause all kinds of weird
  problems, probably including ACPI issues. Now we add only the actually
  usable ranges to our list.

Kernel:
* vm_page_init(): The pages of the ranges between the usable physical memory
  ranges are now marked PAGE_STATE_UNUSED, the allocated ranges
  PAGE_STATE_WIRED.
* unmap_and_free_physical_pages(): Don't free pages marked as unused.


Modified: haiku/trunk/headers/private/kernel/arch/x86/arch_kernel_args.h
===================================================================
--- haiku/trunk/headers/private/kernel/arch/x86/arch_kernel_args.h      
2010-03-02 17:50:21 UTC (rev 35725)
+++ haiku/trunk/headers/private/kernel/arch/x86/arch_kernel_args.h      
2010-03-02 18:13:06 UTC (rev 35726)
@@ -25,6 +25,7 @@
        uint32  vir_pgdir;
        uint32  num_pgtables;
        uint32  pgtables[MAX_BOOT_PTABLES];
+       uint32  virtual_end;
        uint32  phys_idt;
        uint32  vir_idt;
        uint32  phys_gdt;

Modified: 
haiku/trunk/headers/private/kernel/boot/platform/bios_ia32/platform_kernel_args.h
===================================================================
--- 
haiku/trunk/headers/private/kernel/boot/platform/bios_ia32/platform_kernel_args.h
   2010-03-02 17:50:21 UTC (rev 35725)
+++ 
haiku/trunk/headers/private/kernel/boot/platform/bios_ia32/platform_kernel_args.h
   2010-03-02 18:13:06 UTC (rev 35726)
@@ -16,9 +16,9 @@
 
 // must match SMP_MAX_CPUS in arch_smp.h
 #define MAX_BOOT_CPUS 8
-#define MAX_PHYSICAL_MEMORY_RANGE 6
-#define MAX_PHYSICAL_ALLOCATED_RANGE 6
-#define MAX_VIRTUAL_ALLOCATED_RANGE 6
+#define MAX_PHYSICAL_MEMORY_RANGE 8
+#define MAX_PHYSICAL_ALLOCATED_RANGE 8
+#define MAX_VIRTUAL_ALLOCATED_RANGE 8
 
 #define MAX_SERIAL_PORTS 4
 

Modified: haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp
===================================================================
--- haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp      2010-03-02 
17:50:21 UTC (rev 35725)
+++ haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp      2010-03-02 
18:13:06 UTC (rev 35726)
@@ -72,7 +72,7 @@
 
 
 static const uint32 kDefaultPageTableFlags = 0x07;     // present, user, R/W
-static const size_t kMaxKernelSize = 0x200000;         // 2 MB for the kernel
+static const size_t kMaxKernelSize = 0x1000000;                // 16 MB for 
the kernel
 
 // working page directory and page table
 static uint32 *sPageDirectory = 0;
@@ -81,7 +81,6 @@
 
 static addr_t sNextPhysicalAddress = 0x112000;
 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
-static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
 
 static addr_t sNextPageTableAddress = 0x7d000;
 static const uint32 kPageTableRegionEnd = 0x8b000;
@@ -91,7 +90,6 @@
 
 static addr_t sNextPhysicalAddress = 0x100000;
 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
-static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
 
 static addr_t sNextPageTableAddress = 0x90000;
 static const uint32 kPageTableRegionEnd = 0x9e000;
@@ -151,14 +149,17 @@
 
 
 /*!    Adds a new page table for the specified base address */
-static void
+static uint32*
 add_page_table(addr_t base)
 {
+       base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
+
        // Get new page table and clear it out
        uint32 *pageTable = get_next_page_table();
        if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
                panic("tried to add page table beyond the identity mapped 8 MB "
                        "region\n");
+               return NULL;
        }
 
        TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, 
pageTable);
@@ -172,6 +173,13 @@
        // put the new page table into the page directory
        sPageDirectory[base / (4 * 1024 * 1024)]
                = (uint32)pageTable | kDefaultPageTableFlags;
+
+       // update the virtual end address in the kernel args
+       base += B_PAGE_SIZE * 1024;
+       if (base > gKernelArgs.arch_args.virtual_end)
+               gKernelArgs.arch_args.virtual_end = base;
+
+       return pageTable;
 }
 
 
@@ -210,22 +218,23 @@
                        (void *)virtualAddress);
        }
 
-       if (virtualAddress >= sMaxVirtualAddress) {
+       uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
+               / (B_PAGE_SIZE * 1024)] & 0xfffff000);
+
+       if (pageTable == NULL) {
                // we need to add a new page table
-               add_page_table(sMaxVirtualAddress);
-               sMaxVirtualAddress += B_PAGE_SIZE * 1024;
+               pageTable = add_page_table(virtualAddress);
 
-               if (virtualAddress >= sMaxVirtualAddress) {
-                       panic("map_page: asked to map a page to %p\n",
-                               (void *)virtualAddress);
+               if (pageTable == NULL) {
+                       panic("map_page: failed to allocate a page table for 
virtual "
+                               "address %p\n", (void*)virtualAddress);
+                       return;
                }
        }
 
        physicalAddress &= ~(B_PAGE_SIZE - 1);
 
        // map the page to the correct page table
-       uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
-               / (B_PAGE_SIZE * 1024)] & 0xfffff000);
        uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / 
B_PAGE_SIZE;
 
        TRACE("map_page: inserting pageTable %p, tableEntry %" B_PRIu32
@@ -350,7 +359,6 @@
        sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
 
        gKernelArgs.arch_args.num_pgtables = 0;
-       add_page_table(KERNEL_BASE);
 
        // switch to the new pgdir and enable paging
        asm("movl %0, %%eax;"
@@ -598,6 +606,8 @@
 {
        TRACE("mmu_init\n");
 
+       gKernelArgs.arch_args.virtual_end = KERNEL_BASE;
+
        gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
        gKernelArgs.physical_allocated_range[0].size = 0;
        gKernelArgs.num_physical_allocated_ranges = 1;
@@ -635,69 +645,42 @@
                for (uint32 i = 0; i < extMemoryCount; i++) {
                        // Type 1 is available memory
                        if (extMemoryBlock[i].type == 1) {
+                               uint64 base = extMemoryBlock[i].base_addr;
+                               uint64 end = base + extMemoryBlock[i].length;
+
                                // round everything up to page boundaries, 
exclusive of pages
                                // it partially occupies
-                               if ((extMemoryBlock[i].base_addr % B_PAGE_SIZE) 
!= 0) {
-                                       extMemoryBlock[i].length -= B_PAGE_SIZE
-                                               - extMemoryBlock[i].base_addr % 
B_PAGE_SIZE;
-                               }
-                               extMemoryBlock[i].base_addr
-                                       = ROUNDUP(extMemoryBlock[i].base_addr, 
B_PAGE_SIZE);
-                               extMemoryBlock[i].length
-                                       = ROUNDDOWN(extMemoryBlock[i].length, 
B_PAGE_SIZE);
+                               base = ROUNDUP(base, B_PAGE_SIZE);
+                               end = ROUNDDOWN(end, B_PAGE_SIZE);
 
                                // we ignore all memory beyond 4 GB
-                               if (extMemoryBlock[i].base_addr > 0xffffffffULL)
+                               if (end > 0x100000000ULL)
+                                       end = 0x100000000ULL;
+                               if (end <= base)
                                        continue;
 
-                               if (extMemoryBlock[i].base_addr + 
extMemoryBlock[i].length
-                                               > 0xffffffffULL) {
-                                       extMemoryBlock[i].length
-                                               = 0x100000000ULL - 
extMemoryBlock[i].base_addr;
+                               if (insert_physical_memory_range(base, end - 
base) != B_OK) {
+                                       panic("mmu_init(): Failed to add 
physical memory range "
+                                               "%#" B_PRIx64 " - %#" B_PRIx64 
"\n", base, end);
                                }
-
-                               if (gKernelArgs.num_physical_memory_ranges > 0) 
{
-                                       // we might want to extend a previous 
hole
-                                       addr_t previousEnd = 
gKernelArgs.physical_memory_range[
-                                                       
gKernelArgs.num_physical_memory_ranges - 1].start
-                                               + 
gKernelArgs.physical_memory_range[
-                                                       
gKernelArgs.num_physical_memory_ranges - 1].size;
-                                       addr_t holeSize = 
extMemoryBlock[i].base_addr - previousEnd;
-
-                                       // If the hole is smaller than 1 MB, we 
try to mark the
-                                       // memory as allocated and extend the 
previous memory range
-                                       if (previousEnd <= 
extMemoryBlock[i].base_addr
-                                               && holeSize < 0x100000
-                                               && 
insert_physical_allocated_range(previousEnd,
-                                                       
extMemoryBlock[i].base_addr - previousEnd)
-                                                               == B_OK) {
-                                               
gKernelArgs.physical_memory_range[
-                                                       
gKernelArgs.num_physical_memory_ranges - 1].size
-                                                               += holeSize;
-                                       }
-                               }
-
-                               
insert_physical_memory_range(extMemoryBlock[i].base_addr,
-                                       extMemoryBlock[i].length);
                        }
                }
+
+               // sort the range
+               sort_addr_range(gKernelArgs.physical_memory_range,
+                       gKernelArgs.num_physical_memory_ranges);
        } else {
                // TODO: for now!
-               dprintf("No extended memory block - using 32 MB (fix me!)\n");
-               uint32 memSize = 32 * 1024 * 1024;
+               dprintf("No extended memory block - using 64 MB (fix me!)\n");
+               uint32 memSize = 64 * 1024 * 1024;
 
                // We dont have an extended map, assume memory is contiguously 
mapped
-               // at 0x0
+               // at 0x0, but leave out the BIOS range ((640k - 1 page) to 1 
MB).
                gKernelArgs.physical_memory_range[0].start = 0;
-               gKernelArgs.physical_memory_range[0].size = memSize;
-               gKernelArgs.num_physical_memory_ranges = 1;
-
-               // mark the bios area allocated
-               uint32 biosRange = gKernelArgs.num_physical_allocated_ranges++;
-
-               gKernelArgs.physical_allocated_range[biosRange].start = 0x9f000;
-                       // 640k - 1 page
-               gKernelArgs.physical_allocated_range[biosRange].size = 0x61000;
+               gKernelArgs.physical_memory_range[0].size = 0x9f000;
+               gKernelArgs.physical_memory_range[1].start = 0x100000;
+               gKernelArgs.physical_memory_range[1].size = memSize;
+               gKernelArgs.num_physical_memory_ranges = 2;
        }
 
        gKernelArgs.arch_args.page_hole = 0xffc00000;

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp  2010-03-02 17:50:21 UTC 
(rev 35725)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm.cpp  2010-03-02 18:13:06 UTC 
(rev 35726)
@@ -615,7 +615,7 @@
 
        // throw away anything in the kernel_args.pgtable[] that's not yet 
mapped
        vm_free_unused_boot_loader_range(KERNEL_BASE,
-               0x400000 * args->arch_args.num_pgtables);
+               args->arch_args.virtual_end - KERNEL_BASE);
 
        return B_OK;
 }

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-03-02 17:50:21 UTC (rev 
35725)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-03-02 18:13:06 UTC (rev 
35726)
@@ -3004,7 +3004,8 @@
                        && (flags & PAGE_PRESENT) != 0) {
                        vm_page* page = vm_lookup_page(physicalAddress / 
B_PAGE_SIZE);
                        if (page != NULL && page->State() != PAGE_STATE_FREE
-                                        && page->State() != PAGE_STATE_CLEAR) {
+                                        && page->State() != PAGE_STATE_CLEAR
+                                        && page->State() != PAGE_STATE_UNUSED) 
{
                                DEBUG_PAGE_ACCESS_START(page);
                                vm_page_set_state(page, PAGE_STATE_FREE);
                        }

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-03-02 17:50:21 UTC 
(rev 35725)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-03-02 18:13:06 UTC 
(rev 35726)
@@ -1253,6 +1253,63 @@
 }
 
 
+static status_t
+mark_page_range_in_use(addr_t startPage, addr_t length, bool wired)
+{
+       TRACE(("mark_page_range_in_use: start 0x%lx, len 0x%lx\n",
+               startPage, length));
+
+       if (sPhysicalPageOffset > startPage) {
+               TRACE(("mark_page_range_in_use: start page %ld is before free 
list\n",
+                       startPage));
+               return B_BAD_VALUE;
+       }
+       startPage -= sPhysicalPageOffset;
+       if (startPage + length > sNumPages) {
+               TRACE(("mark_page_range_in_use: range would extend past free 
list\n"));
+               return B_BAD_VALUE;
+       }
+
+       WriteLocker locker(sFreePageQueuesLock);
+
+       for (addr_t i = 0; i < length; i++) {
+               vm_page *page = &sPages[startPage + i];
+               switch (page->State()) {
+                       case PAGE_STATE_FREE:
+                       case PAGE_STATE_CLEAR:
+                       {
+// TODO: This violates the page reservation policy, since we remove pages from
+// the free/clear queues without having reserved them before. This should 
happen
+// in the early boot process only, though.
+                               DEBUG_PAGE_ACCESS_START(page);
+                               VMPageQueue& queue = page->State() == 
PAGE_STATE_FREE
+                                       ? sFreePageQueue : sClearPageQueue;
+                               queue.Remove(page);
+                               page->SetState(wired ? PAGE_STATE_UNUSED : 
PAGE_STATE_UNUSED);
+                               page->busy = false;
+                               atomic_add(&sUnreservedFreePages, -1);
+                               DEBUG_PAGE_ACCESS_END(page);
+                               break;
+                       }
+                       case PAGE_STATE_WIRED:
+                       case PAGE_STATE_UNUSED:
+                               break;
+                       case PAGE_STATE_ACTIVE:
+                       case PAGE_STATE_INACTIVE:
+                       case PAGE_STATE_MODIFIED:
+                       case PAGE_STATE_CACHED:
+                       default:
+                               // uh
+                               dprintf("mark_page_range_in_use: page 0x%lx in 
non-free state %d!\n",
+                                       startPage + i, page->State());
+                               break;
+               }
+       }
+
+       return B_OK;
+}
+
+
 /*!
        This is a background thread that wakes up every now and then (every 
100ms)
        and moves some pages from the free queue over to the clear queue.
@@ -2761,10 +2818,23 @@
 
        TRACE(("initialized table\n"));
 
-       // mark some of the page ranges in use
+       // mark the ranges between usable physical memory unused
+       addr_t previousEnd = 0;
+       for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
+               addr_t base = args->physical_memory_range[i].start;
+               addr_t size = args->physical_memory_range[i].size;
+               if (base > previousEnd) {
+                       mark_page_range_in_use(previousEnd / B_PAGE_SIZE,
+                               (base - previousEnd) / B_PAGE_SIZE, false);
+               }
+               previousEnd = base + size;
+       }
+
+       // mark the allocated physical page ranges wired
        for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
-               
vm_mark_page_range_inuse(args->physical_allocated_range[i].start / B_PAGE_SIZE,
-                       args->physical_allocated_range[i].size / B_PAGE_SIZE);
+               mark_page_range_in_use(
+                       args->physical_allocated_range[i].start / B_PAGE_SIZE,
+                       args->physical_allocated_range[i].size / B_PAGE_SIZE, 
true);
        }
 
        // The target of actually free pages. This must be at least the system
@@ -2873,57 +2943,7 @@
 status_t
 vm_mark_page_range_inuse(addr_t startPage, addr_t length)
 {
-       TRACE(("vm_mark_page_range_inuse: start 0x%lx, len 0x%lx\n",
-               startPage, length));
-
-       if (sPhysicalPageOffset > startPage) {
-               TRACE(("vm_mark_page_range_inuse: start page %ld is before free 
list\n",
-                       startPage));
-               return B_BAD_VALUE;
-       }
-       startPage -= sPhysicalPageOffset;
-       if (startPage + length > sNumPages) {
-               TRACE(("vm_mark_page_range_inuse: range would extend past free 
list\n"));
-               return B_BAD_VALUE;
-       }
-
-       WriteLocker locker(sFreePageQueuesLock);
-
-       for (addr_t i = 0; i < length; i++) {
-               vm_page *page = &sPages[startPage + i];
-               switch (page->State()) {
-                       case PAGE_STATE_FREE:
-                       case PAGE_STATE_CLEAR:
-                       {
-// TODO: This violates the page reservation policy, since we remove pages from
-// the free/clear queues without having reserved them before. This should 
happen
-// in the early boot process only, though.
-                               DEBUG_PAGE_ACCESS_START(page);
-                               VMPageQueue& queue = page->State() == 
PAGE_STATE_FREE
-                                       ? sFreePageQueue : sClearPageQueue;
-                               queue.Remove(page);
-                               page->SetState(PAGE_STATE_UNUSED);
-                               page->busy = false;
-                               atomic_add(&sUnreservedFreePages, -1);
-                               DEBUG_PAGE_ACCESS_END(page);
-                               break;
-                       }
-                       case PAGE_STATE_WIRED:
-                               break;
-                       case PAGE_STATE_ACTIVE:
-                       case PAGE_STATE_INACTIVE:
-                       case PAGE_STATE_MODIFIED:
-                       case PAGE_STATE_CACHED:
-                       case PAGE_STATE_UNUSED:
-                       default:
-                               // uh
-                               dprintf("vm_mark_page_range_inuse: page 0x%lx 
in non-free state %d!\n",
-                                       startPage + i, page->State());
-                               break;
-               }
-       }
-
-       return B_OK;
+       return mark_page_range_in_use(startPage, length, false);
 }
 
 


Other related posts:

  • » [haiku-commits] r35726 - in haiku/trunk: headers/private/kernel/arch/x86 headers/private/kernel/boot/platform/bios_ia32 src/system/boot/platform/bios_ia32 src/system/kernel/arch/x86 src/system/kernel/vm - ingo_weinhold