added 2 changesets to branch 'refs/remotes/xyzzy-github/x86_64' old head: 4988ca58a0b6f12e48286fe17ee651be490e0f24 new head: e70dd7e0af671e37378d68c241a081ebeaf8f659 ---------------------------------------------------------------------------- 950b24e: Begun work on VMTranslationMap implementation for x86_64. * Added empty source files for all the 64-bit paging method code, and a stub implementation of X86PagingMethod64Bit. * arch_vm_translation_map.cpp has been modified to use X86PagingMethod64Bit on x86_64. e70dd7e: Map all physical memory in the long mode paging setup. Since x86_64 has such a large virtual address space all available physical memory can be mapped in to it. The physical page mapper implementation for x86_64 will use this mapping. Also changed the mapping code to map kernel pages with the global flag. [ Alex Smith <alex@xxxxxxxxxxxxxxxx> ] ---------------------------------------------------------------------------- 16 files changed, 358 insertions(+), 78 deletions(-) headers/private/kernel/arch/x86/arch_kernel.h | 18 ++-- src/system/boot/platform/bios_ia32/long.cpp | 68 ++++++++++--- src/system/kernel/arch/x86/64/int.cpp | 1 - src/system/kernel/arch/x86/64/stubs.cpp | 45 --------- src/system/kernel/arch/x86/Jamfile | 25 +++-- .../kernel/arch/x86/arch_vm_translation_map.cpp | 16 ++- .../arch/x86/paging/64bit/X86PagingMethod64Bit.cpp | 82 ++++++++++++++++ .../arch/x86/paging/64bit/X86PagingMethod64Bit.h | 47 +++++++++ .../x86/paging/64bit/X86PagingStructures64Bit.cpp | 17 ++++ .../x86/paging/64bit/X86PagingStructures64Bit.h | 16 +++ .../x86/paging/64bit/X86VMTranslationMap64Bit.cpp | 20 ++++ .../x86/paging/64bit/X86VMTranslationMap64Bit.h | 15 +++ src/system/kernel/arch/x86/paging/64bit/paging.h | 12 +++ .../arch/x86/paging/x86_physical_page_mapper.h | 1 - .../x86/paging/x86_physical_page_mapper_mapped.cpp | 29 ++++++ .../x86/paging/x86_physical_page_mapper_mapped.h | 24 +++++ ############################################################################ Commit: 950b24e32d8ddbc0d2a4e46de77c0fb4cc18b128 Author: Alex Smith <alex@xxxxxxxxxxxxxxxx> Date: Wed Jul 4 16:02:58 2012 UTC Begun work on VMTranslationMap implementation for x86_64. * Added empty source files for all the 64-bit paging method code, and a stub implementation of X86PagingMethod64Bit. * arch_vm_translation_map.cpp has been modified to use X86PagingMethod64Bit on x86_64. ---------------------------------------------------------------------------- diff --git a/headers/private/kernel/arch/x86/arch_kernel.h b/headers/private/kernel/arch/x86/arch_kernel.h index a30eb94..14413f9 100644 --- a/headers/private/kernel/arch/x86/arch_kernel.h +++ b/headers/private/kernel/arch/x86/arch_kernel.h @@ -36,6 +36,10 @@ #define KERNEL_SIZE 0x8000000000 #define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1)) +// Kernel physical memory map area. +#define KERNEL_PMAP_BASE 0xffffff0000000000 +#define KERNEL_PMAP_SIZE 0x8000000000 + // Userspace address space layout. #define USER_BASE 0x0 #define USER_BASE_ANY 0x100000 @@ -51,9 +55,9 @@ // memory layout -#define KERNEL_BASE 0x80000000 -#define KERNEL_SIZE 0x80000000 -#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1)) +#define KERNEL_BASE 0x80000000 +#define KERNEL_SIZE 0x80000000 +#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1)) /* User space layout is a little special: * The user space does not completely cover the space not covered by the @@ -65,10 +69,10 @@ * TODO: introduce the 1Mb lower barrier again - it's only used for vm86 mode, * and this should be moved into the kernel (and address space) completely. */ -#define USER_BASE 0x00 -#define USER_BASE_ANY 0x100000 -#define USER_SIZE (KERNEL_BASE - 0x10000) -#define USER_TOP (USER_BASE + USER_SIZE) +#define USER_BASE 0x00 +#define USER_BASE_ANY 0x100000 +#define USER_SIZE (KERNEL_BASE - 0x10000) +#define USER_TOP (USER_BASE + USER_SIZE) #define KERNEL_USER_DATA_BASE 0x6fff0000 #define USER_STACK_REGION 0x70000000 diff --git a/src/system/kernel/arch/x86/64/int.cpp b/src/system/kernel/arch/x86/64/int.cpp index 04671a5..849777b 100644 --- a/src/system/kernel/arch/x86/64/int.cpp +++ b/src/system/kernel/arch/x86/64/int.cpp @@ -178,7 +178,6 @@ arch_int_init(kernel_args* args) // Set up the legacy PIC. pic_init(); - panic("not implemented\n"); return B_OK; } diff --git a/src/system/kernel/arch/x86/64/stubs.cpp b/src/system/kernel/arch/x86/64/stubs.cpp index 348887c..1802e92 100644 --- a/src/system/kernel/arch/x86/64/stubs.cpp +++ b/src/system/kernel/arch/x86/64/stubs.cpp @@ -479,51 +479,6 @@ x86_init_user_debug() } -status_t -arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map) -{ - return B_ERROR; -} - - -status_t -arch_vm_translation_map_init(kernel_args *args, - VMPhysicalPageMapper** _physicalPageMapper) -{ - return B_OK; -} - - -status_t -arch_vm_translation_map_init_post_sem(kernel_args *args) -{ - return B_OK; -} - - -status_t -arch_vm_translation_map_init_post_area(kernel_args *args) -{ - return B_OK; -} - - -status_t -arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa, - uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *)) -{ - return B_ERROR; -} - - -bool -arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress, - uint32 protection) -{ - return true; -} - - // Currently got generic elf.cpp #ifdef'd out for x86_64, define stub versions here. status_t diff --git a/src/system/kernel/arch/x86/Jamfile b/src/system/kernel/arch/x86/Jamfile index 256aaf7..a67e420 100644 --- a/src/system/kernel/arch/x86/Jamfile +++ b/src/system/kernel/arch/x86/Jamfile @@ -11,13 +11,12 @@ UsePrivateKernelHeaders ; UsePrivateHeaders shared ; SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ; -SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ; -SEARCH_SOURCE += [ FDirName $(SUBDIR) paging pae ] ; SEARCH_SOURCE += [ FDirName $(SUBDIR) timers ] ; local archSpecificSources ; if $(TARGET_ARCH) = x86_64 { SEARCH_SOURCE += [ FDirName $(SUBDIR) 64 ] ; + SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 64bit ] ; archSpecificSources = arch.S @@ -25,9 +24,19 @@ if $(TARGET_ARCH) = x86_64 { int.cpp interrupts.S stubs.cpp + + # paging + #x86_physical_page_mapper_mapped.cpp + + # paging/64bit + X86PagingMethod64Bit.cpp + X86PagingStructures64Bit.cpp + X86VMTranslationMap64Bit.cpp ; } else { SEARCH_SOURCE += [ FDirName $(SUBDIR) 32 ] ; + SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ; + SEARCH_SOURCE += [ FDirName $(SUBDIR) paging pae ] ; archSpecificSources = arch.S @@ -43,7 +52,6 @@ if $(TARGET_ARCH) = x86_64 { arch_smp.cpp arch_thread.cpp arch_timer.cpp - arch_vm_translation_map.cpp arch_system_info.cpp arch_user_debugger.cpp apic.cpp @@ -59,11 +67,7 @@ if $(TARGET_ARCH) = x86_64 { x86_syscalls.cpp # paging - x86_physical_page_mapper.cpp x86_physical_page_mapper_large_memory.cpp - X86PagingMethod.cpp - X86PagingStructures.cpp - X86VMTranslationMap.cpp # paging/32bit X86PagingMethod32Bit.cpp @@ -87,7 +91,14 @@ local archGenericSources = arch_debug_console.cpp arch_int.cpp arch_vm.cpp + arch_vm_translation_map.cpp pic.cpp + + # paging + x86_physical_page_mapper.cpp + X86PagingMethod.cpp + X86PagingStructures.cpp + X86VMTranslationMap.cpp ; KernelMergeObject kernel_arch_x86.o : diff --git a/src/system/kernel/arch/x86/arch_vm_translation_map.cpp b/src/system/kernel/arch/x86/arch_vm_translation_map.cpp index af3d64b..8362623 100644 --- a/src/system/kernel/arch/x86/arch_vm_translation_map.cpp +++ b/src/system/kernel/arch/x86/arch_vm_translation_map.cpp @@ -12,8 +12,12 @@ #include <boot/kernel_args.h> -#include "paging/32bit/X86PagingMethod32Bit.h" -#include "paging/pae/X86PagingMethodPAE.h" +#ifdef __x86_64__ +# include "paging/64bit/X86PagingMethod64Bit.h" +#else +# include "paging/32bit/X86PagingMethod32Bit.h" +# include "paging/pae/X86PagingMethodPAE.h" +#endif //#define TRACE_VM_TMAP @@ -26,10 +30,14 @@ static union { uint64 align; +#ifdef __x86_64__ + char sixty_four[sizeof(X86PagingMethod64Bit)]; +#else char thirty_two[sizeof(X86PagingMethod32Bit)]; #if B_HAIKU_PHYSICAL_BITS == 64 char pae[sizeof(X86PagingMethodPAE)]; #endif +#endif } sPagingMethodBuffer; @@ -74,7 +82,9 @@ arch_vm_translation_map_init(kernel_args *args, } #endif -#if B_HAIKU_PHYSICAL_BITS == 64 +#ifdef __x86_64__ + gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit; +#elif B_HAIKU_PHYSICAL_BITS == 64 bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON); bool paeNeeded = false; for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp new file mode 100644 index 0000000..88869ed --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp @@ -0,0 +1,82 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ + + +#include "paging/64bit/X86PagingMethod64Bit.h" + +#include <stdlib.h> +#include <string.h> + +#include <boot/kernel_args.h> +#include <util/AutoLock.h> +#include <vm/vm.h> +#include <vm/vm_page.h> +#include <vm/VMAddressSpace.h> + +//#include "paging/64bit/X86PagingStructures64Bit.h" +//#include "paging/64bit/X86VMTranslationMap64Bit.h" +#include "paging/x86_physical_page_mapper.h" +#include "paging/x86_physical_page_mapper_mapped.h" + + +#define TRACE_X86_PAGING_METHOD_64BIT +#ifdef TRACE_X86_PAGING_METHOD_64BIT +# define TRACE(x...) dprintf(x) +#else +# define TRACE(x...) ; +#endif + + +// #pragma mark - X86PagingMethod64Bit + + +X86PagingMethod64Bit::X86PagingMethod64Bit() +{ +} + + +X86PagingMethod64Bit::~X86PagingMethod64Bit() +{ +} + + +status_t +X86PagingMethod64Bit::Init(kernel_args* args, + VMPhysicalPageMapper** _physicalPageMapper) +{ + return B_ERROR; +} + + +status_t +X86PagingMethod64Bit::InitPostArea(kernel_args* args) +{ + return B_ERROR; +} + + +status_t +X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map) +{ + return B_ERROR; +} + + +status_t +X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress, + phys_addr_t physicalAddress, uint8 attributes, + phys_addr_t (*get_free_page)(kernel_args*)) +{ + return B_ERROR; +} + + +bool +X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress, + uint32 protection) +{ + return true; +} + diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h new file mode 100644 index 0000000..660a0b6 --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h @@ -0,0 +1,47 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H +#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H + + +#include <KernelExport.h> + +#include <lock.h> +#include <vm/vm_types.h> + +#include "paging/64bit/paging.h" +#include "paging/X86PagingMethod.h" +#include "paging/X86PagingStructures.h" + + +class TranslationMapPhysicalPageMapper; +class X86PhysicalPageMapper; + + +class X86PagingMethod64Bit : public X86PagingMethod { +public: + X86PagingMethod64Bit(); + virtual ~X86PagingMethod64Bit(); + + virtual status_t Init(kernel_args* args, + VMPhysicalPageMapper** _physicalPageMapper); + virtual status_t InitPostArea(kernel_args* args); + + virtual status_t CreateTranslationMap(bool kernel, + VMTranslationMap** _map); + + virtual status_t MapEarly(kernel_args* args, + addr_t virtualAddress, + phys_addr_t physicalAddress, + uint8 attributes, + phys_addr_t (*get_free_page)(kernel_args*)); + + virtual bool IsKernelPageAccessible(addr_t virtualAddress, + uint32 protection); + +}; + + +#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.cpp b/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.cpp new file mode 100644 index 0000000..24ff03a --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.cpp @@ -0,0 +1,17 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ + + +#include "paging/64bit/X86PagingStructures64Bit.h" + +#include <stdlib.h> +#include <string.h> + +#include <KernelExport.h> + +#include <int.h> + +#include "paging/64bit/X86PagingMethod64Bit.h" + diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.h b/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.h new file mode 100644 index 0000000..f826c95 --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingStructures64Bit.h @@ -0,0 +1,16 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H +#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H + + +#include "paging/pae/paging.h" +#include "paging/X86PagingStructures.h" + + + + + +#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H diff --git a/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp new file mode 100644 index 0000000..090923f --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp @@ -0,0 +1,20 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ + + +#include "paging/64bit/X86VMTranslationMap64Bit.h" + +#include <int.h> +#include <slab/Slab.h> +#include <thread.h> +#include <util/AutoLock.h> +#include <vm/vm_page.h> +#include <vm/VMAddressSpace.h> +#include <vm/VMCache.h> + +#include "paging/64bit/X86PagingMethod64Bit.h" +#include "paging/64bit/X86PagingStructures64Bit.h" +#include "paging/x86_physical_page_mapper.h" + diff --git a/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.h b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.h new file mode 100644 index 0000000..2e0c000 --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.h @@ -0,0 +1,15 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H +#define KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H + + +#include "paging/X86VMTranslationMap.h" + + + + + +#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H diff --git a/src/system/kernel/arch/x86/paging/64bit/paging.h b/src/system/kernel/arch/x86/paging/64bit/paging.h new file mode 100644 index 0000000..d0f077c --- /dev/null +++ b/src/system/kernel/arch/x86/paging/64bit/paging.h @@ -0,0 +1,12 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_PAGING_PAE_PAGING_H +#define KERNEL_ARCH_X86_PAGING_PAE_PAGING_H + + +#include <OS.h> + + +#endif // KERNEL_ARCH_X86_PAGING_PAE_PAGING_H diff --git a/src/system/kernel/arch/x86/paging/x86_physical_page_mapper.h b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper.h index 31e4796..a29459b 100644 --- a/src/system/kernel/arch/x86/paging/x86_physical_page_mapper.h +++ b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper.h @@ -10,7 +10,6 @@ struct kernel_args; -struct vm_translation_map_ops; class TranslationMapPhysicalPageMapper { diff --git a/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.cpp b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.cpp new file mode 100644 index 0000000..45f270c --- /dev/null +++ b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.cpp @@ -0,0 +1,29 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ + + +/*! Physical page mapper implementation for use where the whole of physical + memory is permanently mapped into the kernel address space. + + This is used on x86_64 where the virtual address space is likely a great + deal larger than the amount of physical memory in the machine, so it can + all be mapped in permanently, which is faster and makes life much easier. +*/ + + +#include "paging/x86_physical_page_mapper_mapped.h" + +#include <new> + +#include <cpu.h> +#include <smp.h> +#include <vm/vm.h> +#include <vm/vm_types.h> +#include <vm/VMAddressSpace.h> + +#include "paging/x86_physical_page_mapper.h" +#include "paging/X86PagingStructures.h" +#include "paging/X86VMTranslationMap.h" + diff --git a/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.h b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.h new file mode 100644 index 0000000..d0c4390 --- /dev/null +++ b/src/system/kernel/arch/x86/paging/x86_physical_page_mapper_mapped.h @@ -0,0 +1,24 @@ +/* + * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxxx + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H +#define KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H + + +#include <OS.h> + +#include <util/DoublyLinkedList.h> + + +class TranslationMapPhysicalPageMapper; +class X86PhysicalPageMapper; +struct kernel_args; + + +status_t mapped_physical_page_ops_init(kernel_args* args, + X86PhysicalPageMapper*& _pageMapper, + TranslationMapPhysicalPageMapper*& _kernelPageMapper); + + +#endif // KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H ############################################################################ Commit: e70dd7e0af671e37378d68c241a081ebeaf8f659 Author: Alex Smith <alex@xxxxxxxxxxxxxxxx> Date: Wed Jul 4 17:28:50 2012 UTC Map all physical memory in the long mode paging setup. Since x86_64 has such a large virtual address space all available physical memory can be mapped in to it. The physical page mapper implementation for x86_64 will use this mapping. Also changed the mapping code to map kernel pages with the global flag. ---------------------------------------------------------------------------- diff --git a/src/system/boot/platform/bios_ia32/long.cpp b/src/system/boot/platform/bios_ia32/long.cpp index 567c5f3..43230be 100644 --- a/src/system/boot/platform/bios_ia32/long.cpp +++ b/src/system/boot/platform/bios_ia32/long.cpp @@ -6,6 +6,8 @@ #include "long.h" +#include <algorithm> + #include <KernelExport.h> // Include the x86_64 version of descriptors.h @@ -24,6 +26,12 @@ #include "mmu.h" +static const uint64 kTableMappingFlags = 0x3; +static const uint64 kLargePageMappingFlags = 0x183; +static const uint64 kPageMappingFlags = 0x103; + // Global, R/W, Present + + /*! Convert a 32-bit address to a 64-bit address. */ static inline uint64 fix_address(uint64 address) @@ -87,36 +95,68 @@ long_idt_init() static void long_mmu_init() { + uint64* pml4; + uint64* pdpt; + uint64* pageDir; + uint64* pageTable; addr_t physicalAddress; // Allocate the top level PML4. - uint64* pml4 = (uint64*)mmu_allocate_page(&gKernelArgs.arch_args.phys_pgdir); + pml4 = (uint64*)mmu_allocate_page(&gKernelArgs.arch_args.phys_pgdir); memset(pml4, 0, B_PAGE_SIZE); gKernelArgs.arch_args.vir_pgdir = (uint64)(addr_t)pml4; - // Identity map the first 1GB of memory, do so using large pages. + // Find the highest physical memory address. We map all physical memory + // into the kernel address space, so we want to make sure we map everything + // we have available. + uint64 maxAddress = 0; + for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) { + maxAddress = std::max(maxAddress, + gKernelArgs.physical_memory_range[i].start + + gKernelArgs.physical_memory_range[i].size); + } + + // Want to map at least 4GB, there may be stuff other than usable RAM that + // could be in the first 4GB of physical address space. + maxAddress = std::max(maxAddress, (uint64)0x100000000ll); + maxAddress = ROUNDUP(maxAddress, 0x40000000); - uint64* pdpt = (uint64*)mmu_allocate_page(&physicalAddress); + // Currently only use 1 PDPT (512GB). This will need to change if someone + // wants to use Haiku on a box with more than 512GB of RAM but that's + // probably not going to happen any time soon. + if (maxAddress / 0x40000000 > 512) + panic("Can't currently support more than 512GB of RAM!"); + + // Create page tables for the physical map area. Also map this PDPT + // temporarily at the bottom of the address space so that we are identity + // mapped. + + pdpt = (uint64*)mmu_allocate_page(&physicalAddress); memset(pdpt, 0, B_PAGE_SIZE); - pml4[0] = physicalAddress | 0x3; + pml4[510] = physicalAddress | kTableMappingFlags; + pml4[0] = physicalAddress | kTableMappingFlags; - uint64* pageDir = (uint64*)mmu_allocate_page(&physicalAddress); - memset(pageDir, 0, B_PAGE_SIZE); - pdpt[0] = physicalAddress | 0x3; + for (uint64 i = 0; i < maxAddress; i += 0x40000000) { + dprintf("mapping %llu GB\n", i / 0x40000000); + + pageDir = (uint64*)mmu_allocate_page(&physicalAddress); + memset(pageDir, 0, B_PAGE_SIZE); + pdpt[i / 0x40000000] = physicalAddress | kTableMappingFlags; - for (uint32 i = 0; i < 512; i++) { - pageDir[i] = (i * 0x200000) | 0x83; + for (uint64 j = 0; j < 0x40000000; j += 0x200000) { + pageDir[j / 0x200000] = (i + j) | kLargePageMappingFlags; + } } // Allocate tables for the kernel mappings. pdpt = (uint64*)mmu_allocate_page(&physicalAddress); memset(pdpt, 0, B_PAGE_SIZE); - pml4[511] = physicalAddress | 0x3; + pml4[511] = physicalAddress | kTableMappingFlags; pageDir = (uint64*)mmu_allocate_page(&physicalAddress); memset(pageDir, 0, B_PAGE_SIZE); - pdpt[510] = physicalAddress | 0x3; + pdpt[510] = physicalAddress | kTableMappingFlags; // Store the virtual memory usage information. gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE_64BIT; @@ -125,13 +165,13 @@ long_mmu_init() // We can now allocate page tables and duplicate the mappings across from // the 32-bit address space to them. - uint64* pageTable = NULL; + pageTable = NULL; for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size / B_PAGE_SIZE; i++) { if ((i % 512) == 0) { pageTable = (uint64*)mmu_allocate_page(&physicalAddress); memset(pageTable, 0, B_PAGE_SIZE); - pageDir[i / 512] = physicalAddress | 0x3; + pageDir[i / 512] = physicalAddress | kTableMappingFlags; // Just performed another virtual allocation, account for it. gKernelArgs.virtual_allocated_range[0].size += B_PAGE_SIZE; @@ -142,7 +182,7 @@ long_mmu_init() &physicalAddress)) continue; - pageTable[i % 512] = physicalAddress | 0x3; + pageTable[i % 512] = physicalAddress | kPageMappingFlags; } gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_BASE_64BIT