[haiku-commits] r39206 - in haiku/trunk: headers/private/kernel/arch/arm src/system/kernel/arch/arm src/system/kernel/arch/arm/paging src/system/kernel/arch/arm/paging/32bit

Author: ithamar
Date: 2010-10-30 16:10:30 +0200 (Sat, 30 Oct 2010)
New Revision: 39206
Changeset: http://dev.haiku-os.org/changeset/39206

Added:
   haiku/trunk/src/system/kernel/arch/arm/paging/
   haiku/trunk/src/system/kernel/arch/arm/paging/32bit/
   haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp
   haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h
   
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingStructures32Bit.cpp
   
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingStructures32Bit.h
   
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp
   
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.h
   haiku/trunk/src/system/kernel/arch/arm/paging/32bit/paging.h
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMPagingMethod.cpp
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMPagingMethod.h
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMPagingStructures.cpp
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMPagingStructures.h
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMVMTranslationMap.cpp
   haiku/trunk/src/system/kernel/arch/arm/paging/ARMVMTranslationMap.h
   haiku/trunk/src/system/kernel/arch/arm/paging/arm_physical_page_mapper.cpp
   haiku/trunk/src/system/kernel/arch/arm/paging/arm_physical_page_mapper.h
   
haiku/trunk/src/system/kernel/arch/arm/paging/arm_physical_page_mapper_large_memory.cpp
   
haiku/trunk/src/system/kernel/arch/arm/paging/arm_physical_page_mapper_large_memory.h
Modified:
   haiku/trunk/headers/private/kernel/arch/arm/arch_vm_translation_map.h
   haiku/trunk/src/system/kernel/arch/arm/Jamfile
   haiku/trunk/src/system/kernel/arch/arm/arch_vm_translation_map.cpp
Log:
* ARM: Major VM work
        - This is mostly a copy of the x86 32bit paging method and 
infrastructure, this was copied for two reasons:
                1) It is the most complete VM arch
                2) The first ARM PAE patches have landed on alkml, so we will 
have to deal with it in the future as well,
                   and this infrastructure has proven to be ready ;)
        - No protection features, or dirty/accessed tracking yet
        - Lots of #if 0

        but....

        It boots all the way up to init_modules() now, and then dies because of 
a lack of (ARM) ELF relocation implementation!

        Since at this point the VM can be fully initialised, I'm going to focus 
on CPU exceptions next, so we can get KDL to trigger
        when it happens, and I can actually debug from there ;)


Modified: haiku/trunk/headers/private/kernel/arch/arm/arch_vm_translation_map.h
===================================================================
--- haiku/trunk/headers/private/kernel/arch/arm/arch_vm_translation_map.h       
2010-10-30 13:57:32 UTC (rev 39205)
+++ haiku/trunk/headers/private/kernel/arch/arm/arch_vm_translation_map.h       
2010-10-30 14:10:30 UTC (rev 39206)
@@ -5,20 +5,4 @@
 #ifndef _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H
 #define _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H
 
-#include <arch/vm_translation_map.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-status_t arm_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
-       size_t size);
-void arm_unmap_address_range(addr_t virtualAddress, size_t size);
-status_t arm_remap_address_range(addr_t *virtualAddress, size_t size,
-       bool unmap);
-
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H */

Modified: haiku/trunk/src/system/kernel/arch/arm/Jamfile
===================================================================
--- haiku/trunk/src/system/kernel/arch/arm/Jamfile      2010-10-30 13:57:32 UTC 
(rev 39205)
+++ haiku/trunk/src/system/kernel/arch/arm/Jamfile      2010-10-30 14:10:30 UTC 
(rev 39206)
@@ -5,9 +5,9 @@
 UsePrivateHeaders [ FDirName kernel arch $(TARGET_ARCH) board 
$(TARGET_BOOT_BOARD) ] ;
 
 
-SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
+SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
+SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
 
-
 KernelMergeObject kernel_arch_arm.o :
 #      arch_atomic.c
        arch_commpage.cpp
@@ -29,9 +29,19 @@
        arch_vm_translation_map.cpp
        arch_asm.S
        uart.cpp
-       generic_vm_physical_page_mapper.cpp
-       generic_vm_physical_page_ops.cpp
-#
+
+       # paging
+       arm_physical_page_mapper.cpp
+       arm_physical_page_mapper_large_memory.cpp
+       ARMPagingMethod.cpp
+       ARMPagingStructures.cpp
+       ARMVMTranslationMap.cpp
+
+       # paging/32bit
+       ARMPagingMethod32Bit.cpp
+       ARMPagingStructures32Bit.cpp
+       ARMVMTranslationMap32Bit.cpp
+
        :
        $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
        :

Modified: haiku/trunk/src/system/kernel/arch/arm/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/arm/arch_vm_translation_map.cpp  
2010-10-30 13:57:32 UTC (rev 39205)
+++ haiku/trunk/src/system/kernel/arch/arm/arch_vm_translation_map.cpp  
2010-10-30 14:10:30 UTC (rev 39206)
@@ -1,136 +1,149 @@
 /*
- * Copyright 2007, François Revol, revol@xxxxxxxx
+ * Copyirght 2010, Ithamar R. Adema, ithamar.adema@xxxxxxxxxxxxxxxx
+ * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
  * Distributed under the terms of the MIT License.
  *
- * Copyright 2003-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx
- * Distributed under the terms of the MIT License.
- *
- * Copyright 2001, Travis Geiselbrecht. All rights reserved.
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
  * Distributed under the terms of the NewOS License.
  */
 
-#include <KernelExport.h>
-#include <kernel.h>
-#include <vm/vm.h>
-#include <vm/vm_priv.h>
-#include <vm/VMAddressSpace.h>
-#include <int.h>
-#include <boot/kernel_args.h>
+
 #include <arch/vm_translation_map.h>
-#include <arch/cpu.h>
-//#include <arch_mmu.h>
-#include <stdlib.h>
 
-#include "generic_vm_physical_page_mapper.h"
+#include <boot/kernel_args.h>
 
+#include "paging/32bit/ARMPagingMethod32Bit.h"
+//#include "paging/pae/ARMPagingMethodPAE.h"
 
-void *
-m68k_translation_map_get_pgdir(VMTranslationMap *map)
-{
-       return NULL;
-#warning ARM:WRITEME
-//get_vm_ops()->m68k_translation_map_get_pgdir(map);
-}
 
-//  #pragma mark -
-//  VM API
+#define TRACE_VM_TMAP
+#ifdef TRACE_VM_TMAP
+#      define TRACE(x...) dprintf(x)
+#else
+#      define TRACE(x...) ;
+#endif
 
 
-status_t
-arch_vm_translation_map_init(kernel_args *args,
-        VMPhysicalPageMapper** _physicalPageMapper)
-{
-       return NULL;
-#warning ARM:WRITEME
+static union {
+       uint64  align;
+       char    thirty_two[sizeof(ARMPagingMethod32Bit)];
+#if B_HAIKU_PHYSICAL_BITS == 64
+       char    pae[sizeof(ARMPagingMethodPAE)];
+#endif
+} sPagingMethodBuffer;
 
-//get_vm_ops()->arch_vm_translation_map_init_map(map, kernel);
-}
 
+// #pragma mark - VM API
+
+
 status_t
 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
 {
-       return NULL;
-#warning ARM:WRITEME
+       return gARMPagingMethod->CreateTranslationMap(kernel, _map);
 }
 
+
 status_t
-arch_vm_translation_map_init_kernel_map_post_sem(VMTranslationMap *map)
+arch_vm_translation_map_init(kernel_args *args,
+       VMPhysicalPageMapper** _physicalPageMapper)
 {
-       return NULL;
-#warning ARM:WRITEME
+       TRACE("vm_translation_map_init: entry\n");
 
-//get_vm_ops()->arch_vm_translation_map_init_kernel_map_post_sem(map);
-}
+#ifdef TRACE_VM_TMAP
+       TRACE("physical memory ranges:\n");
+       for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
+               phys_addr_t start = args->physical_memory_range[i].start;
+               phys_addr_t end = start + args->physical_memory_range[i].size;
+               TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", 
start,
+                       end);
+       }
 
+       TRACE("allocated physical ranges:\n");
+       for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
+               phys_addr_t start = args->physical_allocated_range[i].start;
+               phys_addr_t end = start + 
args->physical_allocated_range[i].size;
+               TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", 
start,
+                       end);
+       }
 
-status_t
-arch_vm_translation_map_init(kernel_args *args)
-{
-       return NULL;
-#warning ARM:WRITEME
+       TRACE("allocated virtual ranges:\n");
+       for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
+               addr_t start = args->virtual_allocated_range[i].start;
+               addr_t end = start + args->virtual_allocated_range[i].size;
+               TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, 
end);
+       }
+#endif
 
-//get_vm_ops()->arch_vm_translation_map_init(args);
-}
+#if B_HAIKU_PHYSICAL_BITS == 64 //IRA: Check all 64 bit code and adjust for ARM
+       bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
+       bool paeNeeded = false;
+       for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
+               phys_addr_t end = args->physical_memory_range[i].start
+                       + args->physical_memory_range[i].size;
+               if (end > 0x100000000LL) {
+                       paeNeeded = true;
+                       break;
+               }
+       }
 
+       if (paeAvailable && paeNeeded) {
+               dprintf("using PAE paging\n");
+               gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethodPAE;
+       } else {
+               dprintf("using 32 bit paging (PAE not %s)\n",
+                       paeNeeded ? "available" : "needed");
+               gARMPagingMethod = new(&sPagingMethodBuffer) 
ARMPagingMethod32Bit;
+       }
+#else
+       gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit;
+#endif
 
-status_t
-arch_vm_translation_map_init_post_area(kernel_args *args)
-{
-       return NULL;
-#warning ARM:WRITEME
-
-//get_vm_ops()->arch_vm_translation_map_init_post_area(args);
+       return gARMPagingMethod->Init(args, _physicalPageMapper);
 }
 
 
 status_t
 arch_vm_translation_map_init_post_sem(kernel_args *args)
 {
-       return NULL;
-#warning ARM:WRITEME
-
-//get_vm_ops()->arch_vm_translation_map_init_post_sem(args);
+       return B_OK;
 }
 
 
-/**    Directly maps a page without having knowledge of any kernel structures.
- *     Used only during VM setup.
- *     It currently ignores the "attributes" parameter and sets all pages
- *     read/write.
- */
-
 status_t
-arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress,
-       phys_addr_t physicalAddress, uint8 attributes,
-       phys_addr_t (*get_free_page)(kernel_args *))
+arch_vm_translation_map_init_post_area(kernel_args *args)
 {
-       return NULL;
-#warning ARM:WRITEME
+       TRACE("vm_translation_map_init_post_area: entry\n");
 
-//get_vm_ops()->arch_vm_translation_map_early_map(ka, virtualAddress, 
physicalAddress,
-//             attributes, get_free_page);
+       return gARMPagingMethod->InitPostArea(args);
 }
 
 
-// XXX currently assumes this translation map is active
-
 status_t
-arch_vm_translation_map_early_query(addr_t va, phys_addr_t *out_physical)
+arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
+       uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
 {
-       return NULL;
-#warning ARM:WRITEME
+       TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
 
-//get_vm_ops()->arch_vm_translation_map_early_query(va, out_physical);
+       return gARMPagingMethod->MapEarly(args, va, pa, attributes, 
get_free_page);
 }
 
 
+/*!    Verifies that the page at the given virtual address can be accessed in 
the
+       current context.
+
+       This function is invoked in the kernel debugger. Paranoid checking is in
+       order.
+
+       \param virtualAddress The virtual address to be checked.
+       \param protection The area protection for which to check. Valid is a 
bitwise
+               or of one or more of \c B_KERNEL_READ_AREA or \c 
B_KERNEL_WRITE_AREA.
+       \return \c true, if the address can be accessed in all ways specified by
+               \a protection, \c false otherwise.
+*/
 bool
 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
-        uint32 protection)
+       uint32 protection)
 {
-#warning ARM:WRITEME
-        return TRUE;
-//get_vm_ops()-arch_vm_translation_map_is_kernel_page_accessible(virtualAddress,
-  //              protection);
+       return gARMPagingMethod->IsKernelPageAccessible(virtualAddress, 
protection);
 }
-

Added: 
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp    
                            (rev 0)
+++ 
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp    
    2010-10-30 14:10:30 UTC (rev 39206)
@@ -0,0 +1,552 @@
+/*
+ * Copyirght 2010, Ithamar R. Adema, ithamar.adema@xxxxxxxxxxxxxxxx
+ * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
+ * Distributed under the terms of the MIT License.
+ *
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
+ * Distributed under the terms of the NewOS License.
+ */
+
+
+#include "paging/32bit/ARMPagingMethod32Bit.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <AutoDeleter.h>
+
+#include <arch_system_info.h>
+#include <boot/kernel_args.h>
+#include <int.h>
+#include <thread.h>
+#include <vm/vm.h>
+#include <vm/VMAddressSpace.h>
+#include <arm_mmu.h>
+
+#include "paging/32bit/ARMPagingStructures32Bit.h"
+#include "paging/32bit/ARMVMTranslationMap32Bit.h"
+#include "paging/arm_physical_page_mapper.h"
+#include "paging/arm_physical_page_mapper_large_memory.h"
+
+
+#define TRACE_ARM_PAGING_METHOD_32_BIT
+#ifdef TRACE_ARM_PAGING_METHOD_32_BIT
+#      define TRACE(x...) dprintf(x)
+#else
+#      define TRACE(x...) ;
+#endif
+
+
+using ARMLargePhysicalPageMapper::PhysicalPageSlot;
+
+
+// #pragma mark - ARMPagingMethod32Bit::PhysicalPageSlotPool
+
+
+struct ARMPagingMethod32Bit::PhysicalPageSlotPool
+       : ARMLargePhysicalPageMapper::PhysicalPageSlotPool {
+public:
+       virtual                                         ~PhysicalPageSlotPool();
+
+                       status_t                        
InitInitial(kernel_args* args);
+                       status_t                        
InitInitialPostArea(kernel_args* args);
+
+                       void                            Init(area_id dataArea, 
void* data,
+                                                                       area_id 
virtualArea, addr_t virtualBase);
+
+       virtual status_t                        AllocatePool(
+                                                                       
ARMLargePhysicalPageMapper
+                                                                               
::PhysicalPageSlotPool*& _pool);
+       virtual void                            Map(phys_addr_t physicalAddress,
+                                                                       addr_t 
virtualAddress);
+
+public:
+       static  PhysicalPageSlotPool sInitialPhysicalPagePool;
+
+private:
+       area_id                                 fDataArea;
+       area_id                                 fVirtualArea;
+       addr_t                                  fVirtualBase;
+       page_table_entry*               fPageTable;
+};
+
+
+ARMPagingMethod32Bit::PhysicalPageSlotPool
+       ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool;
+
+
+ARMPagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
+{
+}
+
+
+status_t
+ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
+{
+       // allocate a virtual address range for the pages to be mapped into
+       addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
+               kPageTableAlignment);
+       if (virtualBase == 0) {
+               panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve 
"
+                       "physical page pool space in virtual address space!");
+               return B_ERROR;
+       }
+
+       // allocate memory for the page table and data
+       size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
+       page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
+               areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
+
+       // prepare the page table
+       _EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
+
+       // init the pool structure and add the initial pool
+       Init(-1, pageTable, -1, (addr_t)virtualBase);
+
+       return B_OK;
+}
+
+
+status_t
+ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
+       kernel_args* args)
+{
+       // create an area for the (already allocated) data
+       size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
+       void* temp = fPageTable;
+       area_id area = create_area("physical page pool", &temp,
+               B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+       if (area < B_OK) {
+               panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to 
"
+                       "create area for physical page pool.");
+               return area;
+       }
+       fDataArea = area;
+
+       // create an area for the virtual address space
+       temp = (void*)fVirtualBase;
+       area = vm_create_null_area(VMAddressSpace::KernelID(),
+               "physical page pool space", &temp, B_EXACT_ADDRESS,
+               1024 * B_PAGE_SIZE, 0);
+       if (area < B_OK) {
+               panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to 
"
+                       "create area for physical page pool space.");
+               return area;
+       }
+       fVirtualArea = area;
+
+       return B_OK;
+}
+
+
+void
+ARMPagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
+       area_id virtualArea, addr_t virtualBase)
+{
+       fDataArea = dataArea;
+       fVirtualArea = virtualArea;
+       fVirtualBase = virtualBase;
+       fPageTable = (page_table_entry*)data;
+
+       // init slot list
+       fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
+       addr_t slotAddress = virtualBase;
+       for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
+               PhysicalPageSlot* slot = &fSlots[i];
+               slot->next = slot + 1;
+               slot->pool = this;
+               slot->address = slotAddress;
+       }
+
+       fSlots[1023].next = NULL;
+               // terminate list
+}
+
+
+void
+ARMPagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
+       addr_t virtualAddress)
+{
+       page_table_entry& pte = fPageTable[(virtualAddress - fVirtualBase) / 
B_PAGE_SIZE];
+       pte = (physicalAddress & ARM_PTE_ADDRESS_MASK)
+               | ARM_PTE_TYPE_SMALL_PAGE;
+
+       arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress + 
B_PAGE_SIZE);
+//     invalidate_TLB(virtualAddress);
+}
+
+
+status_t
+ARMPagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
+       ARMLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
+{
+       // create the pool structure
+       PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
+       if (pool == NULL)
+               return B_NO_MEMORY;
+       ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
+
+       // create an area that can contain the page table and the slot
+       // structures
+       size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
+       void* data;
+       virtual_address_restrictions virtualRestrictions = {};
+       virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
+       physical_address_restrictions physicalRestrictions = {};
+       area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
+               PAGE_ALIGN(areaSize), B_FULL_LOCK,
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
+               &virtualRestrictions, &physicalRestrictions, &data);
+       if (dataArea < 0)
+               return dataArea;
+
+       // create the null area for the virtual address space
+       void* virtualBase;
+       area_id virtualArea = vm_create_null_area(
+               VMAddressSpace::KernelID(), "physical page pool space",
+               &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
+               CREATE_AREA_PRIORITY_VIP);
+       if (virtualArea < 0) {
+               delete_area(dataArea);
+               return virtualArea;
+       }
+
+       // prepare the page table
+       memset(data, 0, B_PAGE_SIZE);
+
+       // get the page table's physical address
+       phys_addr_t physicalTable;
+       ARMVMTranslationMap32Bit* map = static_cast<ARMVMTranslationMap32Bit*>(
+               VMAddressSpace::Kernel()->TranslationMap());
+       uint32 dummyFlags;
+       cpu_status state = disable_interrupts();
+       map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
+       restore_interrupts(state);
+
+       // put the page table into the page directory
+       int32 index = VADDR_TO_PDENT((addr_t)virtualBase);
+       page_directory_entry* entry
+               = &map->PagingStructures32Bit()->pgdir_virt[index];
+       PutPageTableInPageDir(entry, physicalTable,
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+       ARMPagingStructures32Bit::UpdateAllPageDirs(index, *entry);
+
+       // init the pool structure
+       pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
+       poolDeleter.Detach();
+       _pool = pool;
+       return B_OK;
+}
+
+
+// #pragma mark - ARMPagingMethod32Bit
+
+
+ARMPagingMethod32Bit::ARMPagingMethod32Bit()
+       :
+       fKernelPhysicalPageDirectory(0),
+       fKernelVirtualPageDirectory(NULL),
+       fPhysicalPageMapper(NULL),
+       fKernelPhysicalPageMapper(NULL)
+{
+}
+
+
+ARMPagingMethod32Bit::~ARMPagingMethod32Bit()
+{
+}
+
+
+status_t
+ARMPagingMethod32Bit::Init(kernel_args* args,
+       VMPhysicalPageMapper** _physicalPageMapper)
+{
+       TRACE("vm_translation_map_init: entry\n");
+
+       fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
+       fKernelVirtualPageDirectory = (page_directory_entry*)
+               args->arch_args.vir_pgdir;
+
+       TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
+               fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
+
+       ARMPagingStructures32Bit::StaticInit();
+
+       // create the initial pool for the physical page mapper
+       PhysicalPageSlotPool* pool
+               = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
+                       PhysicalPageSlotPool;
+       status_t error = pool->InitInitial(args);
+       if (error != B_OK) {
+               panic("ARMPagingMethod32Bit::Init(): Failed to create initial 
pool "
+                       "for physical page mapper!");
+               return error;
+       }
+
+       // create physical page mapper
+       large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
+               fKernelPhysicalPageMapper);
+               // TODO: Select the best page mapper!
+
+       // enable global page feature if available
+#if 0 //IRA: check for ARMv6!!
+       if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
+               // this prevents kernel pages from being flushed from TLB on
+               // context-switch
+               x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
+       }
+#endif
+       TRACE("vm_translation_map_init: done\n");
+
+       *_physicalPageMapper = fPhysicalPageMapper;
+       return B_OK;
+}
+
+
+status_t
+ARMPagingMethod32Bit::InitPostArea(kernel_args* args)
+{
+       void *temp;
+       status_t error;
+       area_id area;
+
+       temp = (void*)fKernelVirtualPageDirectory;
+       area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, 
MMU_L1_TABLE_SIZE,
+               B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+       if (area < B_OK)
+               return area;
+
+       error = PhysicalPageSlotPool::sInitialPhysicalPagePool
+               .InitInitialPostArea(args);
+       if (error != B_OK)
+               return error;
+
+       return B_OK;
+}
+
+
+status_t
+ARMPagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** 
_map)
+{
+       ARMVMTranslationMap32Bit* map = new(std::nothrow) 
ARMVMTranslationMap32Bit;
+       if (map == NULL)
+               return B_NO_MEMORY;
+
+       status_t error = map->Init(kernel);
+       if (error != B_OK) {
+               delete map;
+               return error;
+       }
+
+       *_map = map;
+       return B_OK;
+}
+
+
+status_t
+ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
+       phys_addr_t physicalAddress, uint8 attributes,
+       phys_addr_t (*get_free_page)(kernel_args*))
+{
+       // check to see if a page table exists for this range
+       int index = VADDR_TO_PDENT(virtualAddress);
+       if ((fKernelVirtualPageDirectory[index] & ARM_PDE_TYPE_MASK) == 0) {
+               phys_addr_t pgtable;
+               page_directory_entry *e;
+               // we need to allocate a pgtable
+               pgtable = get_free_page(args);
+               // pgtable is in pages, convert to physical address
+               pgtable *= B_PAGE_SIZE;
+
+               TRACE("ARMPagingMethod32Bit::MapEarly(): asked for free page 
for "
+                       "pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
+
+               // put it in the pgdir
+               e = &fKernelVirtualPageDirectory[index];
+               PutPageTableInPageDir(e, pgtable, attributes);
+
+               // zero it out in it's new mapping
+               memset((void*)pgtable, 0, B_PAGE_SIZE);
+       }
+
+       page_table_entry *ptEntry = (page_table_entry*)
+               (fKernelVirtualPageDirectory[index] & ARM_PDE_ADDRESS_MASK);
+       ptEntry += VADDR_TO_PTENT(virtualAddress);
+
+       ASSERT_PRINT(
+               (*ptEntry & ARM_PTE_TYPE_MASK) == 0,
+               "virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
+               ", existing pte: %#" B_PRIx32, virtualAddress, 
fKernelVirtualPageDirectory[index],
+               *ptEntry);
+
+       // now, fill in the pentry
+       PutPageTableEntryInTable(ptEntry,
+               physicalAddress, attributes, 0, 
IS_KERNEL_ADDRESS(virtualAddress));
+
+       return B_OK;
+}
+
+
+bool
+ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
+       uint32 protection)
+{
+#if 0
+       // We only trust the kernel team's page directory. So switch to it 
first.
+       // Always set it to make sure the TLBs don't contain obsolete data.
+       uint32 physicalPageDirectory;
+       read_cr3(physicalPageDirectory);
+       write_cr3(fKernelPhysicalPageDirectory);
+
+       // get the page directory entry for the address
+       page_directory_entry pageDirectoryEntry;
+       uint32 index = VADDR_TO_PDENT(virtualAddress);
+
+       if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
+               pageDirectoryEntry = fKernelVirtualPageDirectory[index];
+       } else if (fPhysicalPageMapper != NULL) {
+               // map the original page directory and get the entry
+               void* handle;
+               addr_t virtualPageDirectory;
+               status_t error = fPhysicalPageMapper->GetPageDebug(
+                       physicalPageDirectory, &virtualPageDirectory, &handle);
+               if (error == B_OK) {
+                       pageDirectoryEntry
+                               = 
((page_directory_entry*)virtualPageDirectory)[index];
+                       fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, 
handle);
+               } else
+                       pageDirectoryEntry = 0;
+       } else
+               pageDirectoryEntry = 0;
+
+       // map the page table and get the entry
+       page_table_entry pageTableEntry;
+       index = VADDR_TO_PTENT(virtualAddress);
+
+       if ((pageDirectoryEntry & ARM_PDE_PRESENT) != 0
+                       && fPhysicalPageMapper != NULL) {
+               void* handle;
+               addr_t virtualPageTable;
+               status_t error = fPhysicalPageMapper->GetPageDebug(
+                       pageDirectoryEntry & ARM_PDE_ADDRESS_MASK, 
&virtualPageTable,
+                       &handle);
+               if (error == B_OK) {
+                       pageTableEntry = 
((page_table_entry*)virtualPageTable)[index];
+                       fPhysicalPageMapper->PutPageDebug(virtualPageTable, 
handle);
+               } else
+                       pageTableEntry = 0;
+       } else
+               pageTableEntry = 0;
+
+       // switch back to the original page directory
+       if (physicalPageDirectory != fKernelPhysicalPageDirectory)
+               write_cr3(physicalPageDirectory);
+
+       if ((pageTableEntry & ARM_PTE_PRESENT) == 0)
+               return false;
+
+       // present means kernel-readable, so check for writable
+       return (protection & B_KERNEL_WRITE_AREA) == 0
+               || (pageTableEntry & ARM_PTE_WRITABLE) != 0;
+#endif
+       //IRA: fix the above!
+       return true;
+}
+
+
+/*static*/ void
+ARMPagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
+       phys_addr_t pgtablePhysical, uint32 attributes)
+{
+       *entry = (pgtablePhysical & ARM_PDE_ADDRESS_MASK)
+               | ARM_PDE_TYPE_COARSE_L2_PAGE_TABLE;
+               // TODO: we ignore the attributes of the page table - for 
compatibility
+               // with BeOS we allow having user accessible areas in the 
kernel address
+               // space. This is currently being used by some drivers, mainly 
for the
+               // frame buffer. Our current real time data implementation 
makes use of
+               // this fact, too.
+               // We might want to get rid of this possibility one day, 
especially if
+               // we intend to port it to a platform that does not support 
this.
+}
+
+
+/*static*/ void
+ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
+       phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
+       bool globalPage)
+{
+       page_table_entry page = (physicalAddress & ARM_PTE_ADDRESS_MASK)
+               | ARM_PTE_TYPE_SMALL_PAGE;
+#if 0 //IRA
+               | ARM_PTE_PRESENT | (globalPage ? ARM_PTE_GLOBAL : 0)
+               | MemoryTypeToPageTableEntryFlags(memoryType);
+
+       // if the page is user accessible, it's automatically
+       // accessible in kernel space, too (but with the same
+       // protection)
+       if ((attributes & B_USER_PROTECTION) != 0) {
+               page |= ARM_PTE_USER;
+               if ((attributes & B_WRITE_AREA) != 0)
+                       page |= ARM_PTE_WRITABLE;
+       } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
+               page |= ARM_PTE_WRITABLE;
+#endif
+       // put it in the page table
+       *(volatile page_table_entry*)entry = page;
+}
+
+
+/*static*/ void
+ARMPagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
+       addr_t address, size_t size)
+{
+       ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
+       memset(pageTables, 0, 256 * (size / (B_PAGE_SIZE * 256)));
+
+       // put the array of pgtables directly into the kernel pagedir
+       // these will be wired and kept mapped into virtual space to be easy to 
get
+       // to
+       {
+               addr_t virtualTable = (addr_t)pageTables;
+
+               for (size_t i = 0; i < (size / (B_PAGE_SIZE * 256));
+                               i++, virtualTable += 
256*sizeof(page_directory_entry)) {
+                       phys_addr_t physicalTable = 0;
+                       _EarlyQuery(virtualTable, &physicalTable);
+                       page_directory_entry* entry = 
method->KernelVirtualPageDirectory()
+                               + VADDR_TO_PDENT(address) + i;
+                       PutPageTableInPageDir(entry, physicalTable,
+                               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+               }
+       }
+}
+
+
+//! TODO: currently assumes this translation map is active
+/*static*/ status_t
+ARMPagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
+       phys_addr_t *_physicalAddress)
+{
+       ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
+       int index = VADDR_TO_PDENT(virtualAddress);
+       if ((method->KernelVirtualPageDirectory()[index] & ARM_PDE_TYPE_MASK) 
== 0) {
+               // no pagetable here
+               return B_ERROR;
+       }
+
+       page_table_entry* entry = (page_table_entry*)
+               (method->KernelVirtualPageDirectory()[index] & 
ARM_PDE_ADDRESS_MASK);
+       entry += VADDR_TO_PTENT(virtualAddress);
+
+       if ((*entry & ARM_PTE_TYPE_MASK) == 0) {
+               // page mapping not valid
+               return B_ERROR;
+       }
+
+       *_physicalAddress = (*entry & ARM_PTE_ADDRESS_MASK)
+               | VADDR_TO_PGOFF(virtualAddress);
+
+       return B_OK;
+}

Added: 
haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h  
                        (rev 0)
+++ haiku/trunk/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h  
2010-10-30 14:10:30 UTC (rev 39206)
@@ -0,0 +1,174 @@
+/*
+ * Copyirght 2010, Ithamar R. Adema, ithamar.adema@xxxxxxxxxxxxxxxx
+ * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_METHOD_32_BIT_H
+#define KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_METHOD_32_BIT_H
+
+
+#include "paging/32bit/paging.h"
+#include "paging/ARMPagingMethod.h"
+#include "paging/ARMPagingStructures.h"
+
+
+class TranslationMapPhysicalPageMapper;
+class ARMPhysicalPageMapper;
+
+
+class ARMPagingMethod32Bit : public ARMPagingMethod {
+public:
+                                                               
ARMPagingMethod32Bit();
+       virtual                                         ~ARMPagingMethod32Bit();
+
+       virtual status_t                        Init(kernel_args* args,
+                                                                       
VMPhysicalPageMapper** _physicalPageMapper);
+       virtual status_t                        InitPostArea(kernel_args* args);
+
+       virtual status_t                        CreateTranslationMap(bool 
kernel,
+                                                                       
VMTranslationMap** _map);
+
+       virtual status_t                        MapEarly(kernel_args* args,
+                                                                       addr_t 
virtualAddress,
+                                                                       
phys_addr_t physicalAddress,
+                                                                       uint8 
attributes,
+                                                                       
phys_addr_t (*get_free_page)(kernel_args*));
+
+       virtual bool                            IsKernelPageAccessible(addr_t 
virtualAddress,
+                                                                       uint32 
protection);
+
+       inline  uint32                          KernelPhysicalPageDirectory() 
const
+                                                                       { 
return fKernelPhysicalPageDirectory; }
+       inline  page_directory_entry* KernelVirtualPageDirectory() const
+                                                                       { 
return fKernelVirtualPageDirectory; }
+       inline  ARMPhysicalPageMapper* PhysicalPageMapper() const
+                                                                       { 
return fPhysicalPageMapper; }
+       inline  TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() 
const
+                                                                       { 
return fKernelPhysicalPageMapper; }
+
+       static  ARMPagingMethod32Bit* Method();
+
+       static  void                            PutPageTableInPageDir(
+                                                                       
page_directory_entry* entry,
+                                                                       
phys_addr_t pgtablePhysical,
+                                                                       uint32 
attributes);
+       static  void                            PutPageTableEntryInTable(
+                                                                       
page_table_entry* entry,
+                                                                       
phys_addr_t physicalAddress,
+                                                                       uint32 
attributes, uint32 memoryType,
+                                                                       bool 
globalPage);
+       static  page_table_entry        SetPageTableEntry(page_table_entry* 
entry,
+                                                                       
page_table_entry newEntry);
+       static  page_table_entry        
SetPageTableEntryFlags(page_table_entry* entry,
+                                                                       uint32 
flags);
+       static  page_table_entry        TestAndSetPageTableEntry(
+                                                                       
page_table_entry* entry,
+                                                                       
page_table_entry newEntry,
+                                                                       
page_table_entry oldEntry);
+       static  page_table_entry        ClearPageTableEntry(page_table_entry* 
entry);
+       static  page_table_entry        ClearPageTableEntryFlags(
+                                                                       
page_table_entry* entry, uint32 flags);
+
+       static  uint32                          MemoryTypeToPageTableEntryFlags(
+                                                                       uint32 
memoryType);
+
+private:
+                       struct PhysicalPageSlotPool;
+                       friend struct PhysicalPageSlotPool;
+
+private:
+       static  void                            _EarlyPreparePageTables(
+                                                                       
page_table_entry* pageTables,
+                                                                       addr_t 
address, size_t size);
+       static  status_t                        _EarlyQuery(addr_t 
virtualAddress,
+                                                                       
phys_addr_t *_physicalAddress);
+
+private:
+                       uint32                          
fKernelPhysicalPageDirectory;
+                       page_directory_entry* fKernelVirtualPageDirectory;
+
+                       ARMPhysicalPageMapper* fPhysicalPageMapper;
+                       TranslationMapPhysicalPageMapper* 
fKernelPhysicalPageMapper;
+};
+
+
+/*static*/ inline ARMPagingMethod32Bit*
+ARMPagingMethod32Bit::Method()
+{
+       return static_cast<ARMPagingMethod32Bit*>(gARMPagingMethod);
+}
+
+
+/*static*/ inline page_table_entry
+ARMPagingMethod32Bit::SetPageTableEntry(page_table_entry* entry,
+       page_table_entry newEntry)
+{
+       return atomic_set((int32*)entry, newEntry);
+}
+
+
+/*static*/ inline page_table_entry
+ARMPagingMethod32Bit::SetPageTableEntryFlags(page_table_entry* entry,
+       uint32 flags)
+{
+       return atomic_or((int32*)entry, flags);
+}
+
+
+/*static*/ inline page_table_entry
+ARMPagingMethod32Bit::TestAndSetPageTableEntry(page_table_entry* entry,
+       page_table_entry newEntry, page_table_entry oldEntry)
+{
+       return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
+}
+
+
+/*static*/ inline page_table_entry
+ARMPagingMethod32Bit::ClearPageTableEntry(page_table_entry* entry)
+{
+       return SetPageTableEntry(entry, 0);
+}
+
+
+/*static*/ inline page_table_entry

[... truncated: 2490 lines follow ...]

Other related posts: