[haiku-commits] r37085 - haiku/trunk/src/system/kernel/arch/x86/paging/pae

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Thu, 10 Jun 2010 15:25:36 +0200 (CEST)

Author: bonefish
Date: 2010-06-10 15:25:36 +0200 (Thu, 10 Jun 2010)
New Revision: 37085
Changeset: http://dev.haiku-os.org/changeset/37085/haiku

Modified:
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.cpp
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.h
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
Log:
Fleshed out most of the unimplemented methods. The kernel boots up to the
creation of the initial shell, now.


Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp    
2010-06-10 13:22:44 UTC (rev 37084)
+++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp    
2010-06-10 13:25:36 UTC (rev 37085)
@@ -1,6 +1,10 @@
 /*
- * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
  * Distributed under the terms of the MIT License.
+ *
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
+ * Distributed under the terms of the NewOS License.
  */
 
 
@@ -57,9 +61,11 @@
                        fPageHolePageDir, fPhysicalPageDir);
        }
 
-       void Switch(void*& _pageStructures, size_t& _pageStructuresSize,
-               pae_page_directory_entry** pageDirs, phys_addr_t* 
physicalPageDirs,
-               addr_t& _freeVirtualSlot, pae_page_table_entry*& 
_freeVirtualSlotPTE)
+       void Switch(pae_page_directory_pointer_table_entry*& _virtualPDPT,
+               phys_addr_t& _physicalPDPT, void*& _pageStructures,
+               size_t& _pageStructuresSize, pae_page_directory_entry** 
pageDirs,
+               phys_addr_t* physicalPageDirs, addr_t& _freeVirtualSlot,
+               pae_page_table_entry*& _freeVirtualSlotPTE)
        {
                // count the page tables we have to translate
                uint32 pageTableCount = 0;
@@ -134,6 +140,8 @@
                x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | 
IA32_CR4_GLOBAL_PAGES);
 
                // set return values
+               _virtualPDPT = pdpt;
+               _physicalPDPT = physicalPDPT;
                _pageStructures = fAllocatedPages;
                _pageStructuresSize = (size_t)fUsedPagesCount * B_PAGE_SIZE;
                memcpy(pageDirs, fPageDirs, sizeof(fPageDirs));
@@ -439,8 +447,12 @@
 X86PagingMethodPAE::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
        addr_t virtualAddress)
 {
-// TODO: Implement!
-       panic("X86PagingMethodPAE::PhysicalPageSlotPool::Map(): not 
implemented");
+       pae_page_table_entry& pte = fPageTable[
+               (virtualAddress - fVirtualBase) / B_PAGE_SIZE];
+       pte = (physicalAddress & X86_PAE_PTE_ADDRESS_MASK)
+               | X86_PAE_PTE_WRITABLE | X86_PAE_PTE_GLOBAL | 
X86_PAE_PTE_PRESENT;
+
+       invalidate_TLB(virtualAddress);
 }
 
 
@@ -475,9 +487,10 @@
        VMPhysicalPageMapper** _physicalPageMapper)
 {
        // switch to PAE
-       ToPAESwitcher(args).Switch(fEarlyPageStructures, 
fEarlyPageStructuresSize,
-               fKernelVirtualPageDirs, fKernelPhysicalPageDirs, 
fFreeVirtualSlot,
-               fFreeVirtualSlotPTE);
+       ToPAESwitcher(args).Switch(fKernelVirtualPageDirPointerTable,
+               fKernelPhysicalPageDirPointerTable, fEarlyPageStructures,
+               fEarlyPageStructuresSize, fKernelVirtualPageDirs,
+               fKernelPhysicalPageDirs, fFreeVirtualSlot, fFreeVirtualSlotPTE);
 
        // create the initial pool for the physical page mapper
        PhysicalPageSlotPool* pool
@@ -502,18 +515,38 @@
 status_t
 X86PagingMethodPAE::InitPostArea(kernel_args* args)
 {
-// TODO: Implement!
-       panic("X86PagingMethodPAE::InitPostArea(): not implemented");
-       return B_UNSUPPORTED;
+       // wrap the kernel paging structures in an area
+       area_id area = create_area("kernel paging structs", 
&fEarlyPageStructures,
+               B_EXACT_ADDRESS, fEarlyPageStructuresSize, B_ALREADY_WIRED,
+               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
+       if (area < B_OK)
+               return area;
+
+       // let the initial page pool create areas for its structures
+       status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool
+               .InitInitialPostArea(args);
+       if (error != B_OK)
+               return error;
+
+       return B_OK;
 }
 
 
 status_t
 X86PagingMethodPAE::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
 {
-// TODO: Implement!
-       panic("X86PagingMethodPAE::CreateTranslationMap(): not implemented");
-       return B_UNSUPPORTED;
+       X86VMTranslationMapPAE* map = new(std::nothrow) X86VMTranslationMapPAE;
+       if (map == NULL)
+               return B_NO_MEMORY;
+
+       status_t error = map->Init(kernel);
+       if (error != B_OK) {
+               delete map;
+               return error;
+       }
+
+       *_map = map;
+       return B_OK;
 }
 
 
@@ -533,7 +566,7 @@
                TRACE("X86PagingMethodPAE::MapEarly(): asked for free page for "
                        "page table: %#" B_PRIxPHYSADDR "\n", 
physicalPageTable);
 
-               // put it in the pgdir
+               // put it in the page dir
                PutPageTableInPageDir(pageDirEntry, physicalPageTable, 
attributes);
 
                // zero it out

Modified: haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h      
2010-06-10 13:22:44 UTC (rev 37084)
+++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h      
2010-06-10 13:25:36 UTC (rev 37085)
@@ -46,8 +46,13 @@
        inline  TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() 
const
                                                                        { 
return fKernelPhysicalPageMapper; }
 
+       inline  pae_page_directory_pointer_table_entry*
+                                                                       
KernelVirtualPageDirPointerTable() const;
+       inline  phys_addr_t                     
KernelPhysicalPageDirPointerTable() const;
        inline  pae_page_directory_entry* const* KernelVirtualPageDirs() const
                                                                        { 
return fKernelVirtualPageDirs; }
+       inline  const phys_addr_t*      KernelPhysicalPageDirs() const
+                                                                       { 
return fKernelPhysicalPageDirs; }
 
        static  X86PagingMethodPAE*     Method();
 
@@ -60,6 +65,18 @@
                                                                        
phys_addr_t physicalAddress,
                                                                        uint32 
attributes, uint32 memoryType,
                                                                        bool 
globalPage);
+       static  pae_page_table_entry SetPageTableEntry(pae_page_table_entry* 
entry,
+                                                                       
pae_page_table_entry newEntry);
+       static  pae_page_table_entry SetPageTableEntryFlags(
+                                                                       
pae_page_table_entry* entry, uint64 flags);
+       static  pae_page_table_entry TestAndSetPageTableEntry(
+                                                                       
pae_page_table_entry* entry,
+                                                                       
pae_page_table_entry newEntry,
+                                                                       
pae_page_table_entry oldEntry);
+       static  pae_page_table_entry ClearPageTableEntry(
+                                                                       
pae_page_table_entry* entry);
+       static  pae_page_table_entry ClearPageTableEntryFlags(
+                                                                       
pae_page_table_entry* entry, uint64 flags);
 
        static  pae_page_directory_entry* PageDirEntryForAddress(
                                                                        
pae_page_directory_entry* const* pdpt,
@@ -84,6 +101,9 @@
 
                        void*                           fEarlyPageStructures;
                        size_t                          
fEarlyPageStructuresSize;
+                       pae_page_directory_pointer_table_entry*
+                                                                       
fKernelVirtualPageDirPointerTable;
+                       phys_addr_t                     
fKernelPhysicalPageDirPointerTable;
                        pae_page_directory_entry* fKernelVirtualPageDirs[4];
                        phys_addr_t                     
fKernelPhysicalPageDirs[4];
                        addr_t                          fFreeVirtualSlot;
@@ -91,6 +111,20 @@
 };
 
 
+pae_page_directory_pointer_table_entry*
+X86PagingMethodPAE::KernelVirtualPageDirPointerTable() const
+{
+       return fKernelVirtualPageDirPointerTable;
+}
+
+
+phys_addr_t
+X86PagingMethodPAE::KernelPhysicalPageDirPointerTable() const
+{
+       return fKernelPhysicalPageDirPointerTable;
+}
+
+
 /*static*/ inline X86PagingMethodPAE*
 X86PagingMethodPAE::Method()
 {
@@ -107,6 +141,45 @@
 }
 
 
+/*static*/ inline pae_page_table_entry
+X86PagingMethodPAE::SetPageTableEntry(pae_page_table_entry* entry,
+       pae_page_table_entry newEntry)
+{
+       return atomic_set64((int64*)entry, newEntry);
+}
+
+
+/*static*/ inline pae_page_table_entry
+X86PagingMethodPAE::SetPageTableEntryFlags(pae_page_table_entry* entry,
+       uint64 flags)
+{
+       return atomic_or64((int64*)entry, flags);
+}
+
+
+/*static*/ inline pae_page_table_entry
+X86PagingMethodPAE::TestAndSetPageTableEntry(pae_page_table_entry* entry,
+       pae_page_table_entry newEntry, pae_page_table_entry oldEntry)
+{
+       return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
+}
+
+
+/*static*/ inline pae_page_table_entry
+X86PagingMethodPAE::ClearPageTableEntry(pae_page_table_entry* entry)
+{
+       return SetPageTableEntry(entry, 0);
+}
+
+
+/*static*/ inline pae_page_table_entry
+X86PagingMethodPAE::ClearPageTableEntryFlags(pae_page_table_entry* entry,
+       uint64 flags)
+{
+       return atomic_and64((int64*)entry, ~flags);
+}
+
+
 /*static*/ inline uint32
 X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
 {

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.cpp    
    2010-06-10 13:22:44 UTC (rev 37084)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.cpp    
    2010-06-10 13:25:36 UTC (rev 37085)
@@ -6,6 +6,8 @@
 
 #include "paging/pae/X86PagingStructuresPAE.h"
 
+#include <string.h>
+
 #include <KernelExport.h>
 
 
@@ -19,14 +21,21 @@
 
 X86PagingStructuresPAE::~X86PagingStructuresPAE()
 {
+// TODO: Implement!
+       panic("X86PagingStructuresPAE::~X86PagingStructuresPAE(): not 
implemented");
 }
 
 
 void
-X86PagingStructuresPAE::Init()
+X86PagingStructuresPAE::Init(
+       pae_page_directory_pointer_table_entry* virtualPDPT,
+       phys_addr_t physicalPDPT, pae_page_directory_entry* const* 
virtualPageDirs,
+       const phys_addr_t* physicalPageDirs)
 {
-// TODO: Implement!
-       panic("X86PagingStructuresPAE::Init(): not implemented");
+       fPageDirPointerTable = virtualPDPT;
+       pgdir_phys = physicalPDPT;
+       memcpy(fVirtualPageDirs, virtualPageDirs, sizeof(fVirtualPageDirs));
+       memcpy(fPhysicalPageDirs, physicalPageDirs, sizeof(fPhysicalPageDirs));
 }
 
 
@@ -38,12 +47,4 @@
 }
 
 
-/*static*/ void
-X86PagingStructuresPAE::StaticInit()
-{
-// TODO: Implement!
-       panic("X86PagingStructuresPAE::StaticInit(): not implemented");
-}
-
-
 #endif // B_HAIKU_PHYSICAL_BITS == 64

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.h  
2010-06-10 13:22:44 UTC (rev 37084)
+++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingStructuresPAE.h  
2010-06-10 13:25:36 UTC (rev 37085)
@@ -17,11 +17,22 @@
                                                                
X86PagingStructuresPAE();
        virtual                                         
~X86PagingStructuresPAE();
 
-                       void                            Init();
+                       void                            
Init(pae_page_directory_pointer_table_entry*
+                                                                               
virtualPDPT,
+                                                                        
phys_addr_t physicalPDPT,
+                                                                        
pae_page_directory_entry* const*
+                                                                               
virtualPageDirs,
+                                                                        const 
phys_addr_t* physicalPageDirs);
 
        virtual void                            Delete();
 
-       static  void                            StaticInit();
+                       pae_page_directory_entry* const* VirtualPageDirs() const
+                                                                       { 
return fVirtualPageDirs; }
+
+private:
+                       pae_page_directory_pointer_table_entry* 
fPageDirPointerTable;
+                       pae_page_directory_entry* fVirtualPageDirs[4];
+                       phys_addr_t                     fPhysicalPageDirs[4];
 };
 
 

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp    
    2010-06-10 13:22:44 UTC (rev 37084)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp    
    2010-06-10 13:25:36 UTC (rev 37085)
@@ -1,11 +1,20 @@
 /*
- * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
  * Distributed under the terms of the MIT License.
+ *
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
+ * Distributed under the terms of the NewOS License.
  */
 
 
 #include "paging/pae/X86VMTranslationMapPAE.h"
 
+#include <int.h>
+#include <slab/Slab.h>
+#include <thread.h>
+#include <util/AutoLock.h>
+#include <vm/vm_page.h>
 #include <vm/VMAddressSpace.h>
 #include <vm/VMCache.h>
 
@@ -45,37 +54,184 @@
 
        X86VMTranslationMap::Init(kernel);
 
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
+       if (fPagingStructures == NULL)
+               return B_NO_MEMORY;
+
+       X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
+
+       if (kernel) {
+               // kernel
+               // get the physical page mapper
+               fPageMapper = method->KernelPhysicalPageMapper();
+
+               // we already know the kernel pgdir mapping
+               
fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
+                       method->KernelPhysicalPageDirPointerTable(),
+                       method->KernelVirtualPageDirs(), 
method->KernelPhysicalPageDirs());
+       } else {
+panic("X86VMTranslationMapPAE::Init(): user init not implemented");
+#if 0
+               // user
+               // allocate a physical page mapper
+               status_t error = method->PhysicalPageMapper()
+                       ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
+               if (error != B_OK)
+                       return error;
+
+               // allocate the page directory
+               page_directory_entry* virtualPageDir = 
(page_directory_entry*)memalign(
+                       B_PAGE_SIZE, B_PAGE_SIZE);
+               if (virtualPageDir == NULL)
+                       return B_NO_MEMORY;
+
+               // look up the page directory's physical address
+               phys_addr_t physicalPageDir;
+               vm_get_page_mapping(VMAddressSpace::KernelID(),
+                       (addr_t)virtualPageDir, &physicalPageDir);
+
+               fPagingStructures->Init(virtualPageDir, physicalPageDir,
+                       method->KernelVirtualPageDirectory());
+#endif
+       }
+
+       return B_OK;
 }
 
 
 size_t
 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
 {
-// TODO: Implement!
-       panic("unsupported");
-       return 0;
+       // If start == 0, the actual base address is not yet known to the 
caller and
+       // we shall assume the worst case.
+       if (start == 0) {
+               // offset the range so it has the worst possible alignment
+               start = kPAEPageTableRange - B_PAGE_SIZE;
+               end += kPAEPageTableRange - B_PAGE_SIZE;
+       }
+
+       return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
 }
 
 
 status_t
-X86VMTranslationMapPAE::Map(addr_t va, phys_addr_t pa, uint32 attributes,
-       uint32 memoryType, vm_page_reservation* reservation)
+X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
+       uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" 
B_PRIxPHYSADDR
+               "\n", virtualAddress, physicalAddress);
+
+       // check to see if a page table exists for this range
+       pae_page_directory_entry* pageDirEntry
+               = X86PagingMethodPAE::PageDirEntryForAddress(
+                       fPagingStructures->VirtualPageDirs(), virtualAddress);
+       if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+               // we need to allocate a page table
+               vm_page *page = vm_page_allocate_page(reservation,
+                       PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
+
+               DEBUG_PAGE_ACCESS_END(page);
+
+               phys_addr_t physicalPageTable
+                       = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
+
+               TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
+                       "page table: %#" B_PRIxPHYSADDR "\n", 
physicalPageTable);
+
+               // put it in the page dir
+               X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
+                       physicalPageTable,
+                       attributes
+                               | ((attributes & B_USER_PROTECTION) != 0
+                                               ? B_WRITE_AREA : 
B_KERNEL_WRITE_AREA));
+
+               fMapCount++;
+       }
+
+       // now, fill in the page table entry
+       struct thread* thread = thread_get_current_thread();
+       ThreadCPUPinner pinner(thread);
+
+       pae_page_table_entry* pageTable
+               = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                       *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+       pae_page_table_entry* entry = pageTable
+               + virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
+
+       ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
+               "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
+               virtualAddress, *entry);
+
+       X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
+               attributes, memoryType, fIsKernelMap);
+
+       pinner.Unlock();
+
+       // Note: We don't need to invalidate the TLB for this address, as 
previously
+       // the entry was not present and the TLB doesn't cache those entries.
+
+       fMapCount++;
+
+       return 0;
 }
 
 
 status_t
 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       start = ROUNDDOWN(start, B_PAGE_SIZE);
+       if (start >= end)
+               return B_OK;
+
+       TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" 
B_PRIxADDR
+               "\n", start, end);
+
+       do {
+               pae_page_directory_entry* pageDirEntry
+                       = X86PagingMethodPAE::PageDirEntryForAddress(
+                               fPagingStructures->VirtualPageDirs(), start);
+               if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+                       // no page table here, move the start up to access the 
next page
+                       // table
+                       start = ROUNDUP(start + 1, kPAEPageTableRange);
+                       continue;
+               }
+
+               struct thread* thread = thread_get_current_thread();
+               ThreadCPUPinner pinner(thread);
+
+               pae_page_table_entry* pageTable
+                       = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                               *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+
+               uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
+               for (; index < kPAEPageTableEntryCount && start < end;
+                               index++, start += B_PAGE_SIZE) {
+                       if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
+                               // page mapping not valid
+                               continue;
+                       }
+
+                       TRACE("X86VMTranslationMapPAE::Unmap(): removing page 
%#"
+                               B_PRIxADDR "\n", start);
+
+                       pae_page_table_entry oldEntry
+                               = X86PagingMethodPAE::ClearPageTableEntryFlags(
+                                       &pageTable[index], X86_PAE_PTE_PRESENT);
+                       fMapCount--;
+
+                       if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
+                               // Note, that we only need to invalidate the 
address, if the
+                               // accessed flags was set, since only then the 
entry could have
+                               // been in any TLB.
+                               InvalidatePage(start);
+                       }
+               }
+
+               pinner.Unlock();
+       } while (start != 0 && start < end);
+
+       return B_OK;
 }
 
 
@@ -86,9 +242,113 @@
 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
        bool updatePageQueue)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       ASSERT(address % B_PAGE_SIZE == 0);
+
+       pae_page_directory_entry* pageDirEntry
+               = X86PagingMethodPAE::PageDirEntryForAddress(
+                       fPagingStructures->VirtualPageDirs(), address);
+
+       TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
+
+       RecursiveLocker locker(fLock);
+
+       if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
+               return B_ENTRY_NOT_FOUND;
+
+       ThreadCPUPinner pinner(thread_get_current_thread());
+
+       pae_page_table_entry* pageTable
+               = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                       *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+
+       pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
+               &pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
+
+       pinner.Unlock();
+
+       if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
+               // page mapping not valid
+               return B_ENTRY_NOT_FOUND;
+       }
+
+       fMapCount--;
+
+       if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
+               // Note, that we only need to invalidate the address, if the
+               // accessed flags was set, since only then the entry could have 
been
+               // in any TLB.
+               InvalidatePage(address);
+
+               Flush();
+
+               // NOTE: Between clearing the page table entry and Flush() other
+               // processors (actually even this processor with another thread 
of the
+               // same team) could still access the page in question via their 
cached
+               // entry. We can obviously lose a modified flag in this case, 
with the
+               // effect that the page looks unmodified (and might thus be 
recycled),
+               // but is actually modified.
+               // In most cases this is harmless, but for 
vm_remove_all_page_mappings()
+               // this is actually a problem.
+               // Interestingly FreeBSD seems to ignore this problem as well
+               // (cf. pmap_remove_all()), unless I've missed something.
+       }
+
+       if (area->cache_type == CACHE_TYPE_DEVICE)
+               return B_OK;
+
+       // get the page
+       vm_page* page = vm_lookup_page(
+               (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
+       ASSERT(page != NULL);
+
+       // transfer the accessed/dirty flags to the page
+       if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
+               page->accessed = true;
+       if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
+               page->modified = true;
+
+       // TODO: Here comes a lot of paging method and even architecture 
independent
+       // code. Refactor!
+
+       // remove the mapping object/decrement the wired_count of the page
+       vm_page_mapping* mapping = NULL;
+       if (area->wiring == B_NO_LOCK) {
+               vm_page_mappings::Iterator iterator = 
page->mappings.GetIterator();
+               while ((mapping = iterator.Next()) != NULL) {
+                       if (mapping->area == area) {
+                               area->mappings.Remove(mapping);
+                               page->mappings.Remove(mapping);
+                               break;
+                       }
+               }
+
+               ASSERT(mapping != NULL);
+       } else
+               page->wired_count--;
+
+       locker.Unlock();
+
+       if (page->wired_count == 0 && page->mappings.IsEmpty()) {
+               atomic_add(&gMappedPagesCount, -1);
+
+               if (updatePageQueue) {
+                       if (page->Cache()->temporary)
+                               vm_page_set_state(page, PAGE_STATE_INACTIVE);
+                       else if (page->modified)
+                               vm_page_set_state(page, PAGE_STATE_MODIFIED);
+                       else
+                               vm_page_set_state(page, PAGE_STATE_CACHED);
+               }
+       }
+
+       if (mapping != NULL) {
+               bool isKernelSpace = area->address_space == 
VMAddressSpace::Kernel();
+               object_cache_free(gPageMappingsObjectCache, mapping,
+                       CACHE_DONT_WAIT_FOR_MEMORY
+                               | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE 
: 0));
+       }
+
+       return B_OK;
 }
 
 
@@ -96,8 +356,8 @@
 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
        bool updatePageQueue)
 {
-// TODO: Implement!
-       panic("unsupported");
+// TODO: Implement for real!
+       X86VMTranslationMap::UnmapPages(area, base, size, updatePageQueue);
 }
 
 
@@ -105,18 +365,59 @@
 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
        bool ignoreTopCachePageFlags)
 {
-// TODO: Implement!
-       panic("unsupported");
+// TODO: Implement for real!
+       X86VMTranslationMap::UnmapArea(area, deletingAddressSpace,
+               ignoreTopCachePageFlags);
 }
 
 
 status_t
-X86VMTranslationMapPAE::Query(addr_t va, phys_addr_t *_physical,
-       uint32 *_flags)
+X86VMTranslationMapPAE::Query(addr_t virtualAddress,
+       phys_addr_t* _physicalAddress, uint32* _flags)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       // default the flags to not present
+       *_flags = 0;
+       *_physicalAddress = 0;
+
+       // get the page directory entry
+       pae_page_directory_entry* pageDirEntry
+               = X86PagingMethodPAE::PageDirEntryForAddress(
+                       fPagingStructures->VirtualPageDirs(), virtualAddress);
+       if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+               // no pagetable here
+               return B_OK;
+       }
+
+       // get the page table entry
+       struct thread* thread = thread_get_current_thread();
+       ThreadCPUPinner pinner(thread);
+
+       pae_page_table_entry* pageTable
+               = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                       *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+       pae_page_table_entry entry
+               = pageTable[virtualAddress / B_PAGE_SIZE % 
kPAEPageTableEntryCount];
+
+       pinner.Unlock();
+
+       *_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
+
+       // translate the page state flags
+       if ((entry & X86_PAE_PTE_USER) != 0) {
+               *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA 
: 0)
+                       | B_READ_AREA;
+       }
+
+       *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 
0)
+               | B_KERNEL_READ_AREA
+               | ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
+               | ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
+               | ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
+
+       TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
+               B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
+
+       return B_OK;
 }
 
 
@@ -125,7 +426,7 @@
        uint32 *_flags)
 {
 // TODO: Implement!
-       panic("unsupported");
+       panic("X86VMTranslationMapPAE::QueryInterrupt(): not implemented");
        return B_UNSUPPORTED;
 }
 
@@ -134,18 +435,116 @@
 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
        uint32 memoryType)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       start = ROUNDDOWN(start, B_PAGE_SIZE);
+       if (start >= end)
+               return B_OK;
+
+       TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" 
B_PRIxADDR
+               ", attributes: %#" B_PRIx32 "\n", start, end, attributes);
+
+       // compute protection flags
+       uint64 newProtectionFlags = 0;
+       if ((attributes & B_USER_PROTECTION) != 0) {
+               newProtectionFlags = X86_PAE_PTE_USER;
+               if ((attributes & B_WRITE_AREA) != 0)
+                       newProtectionFlags |= X86_PAE_PTE_WRITABLE;
+       } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
+               newProtectionFlags = X86_PAE_PTE_WRITABLE;
+
+       do {
+               pae_page_directory_entry* pageDirEntry
+                       = X86PagingMethodPAE::PageDirEntryForAddress(
+                               fPagingStructures->VirtualPageDirs(), start);
+               if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+                       // no page table here, move the start up to access the 
next page
+                       // table
+                       start = ROUNDUP(start + 1, kPAEPageTableRange);
+                       continue;
+               }
+
+               struct thread* thread = thread_get_current_thread();
+               ThreadCPUPinner pinner(thread);
+
+               pae_page_table_entry* pageTable
+                       = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                               *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+
+               uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
+               for (; index < kPAEPageTableEntryCount && start < end;
+                               index++, start += B_PAGE_SIZE) {
+                       pae_page_table_entry entry = pageTable[index];
+                       if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
+                               // page mapping not valid
+                               continue;
+                       }
+
+                       TRACE("X86VMTranslationMapPAE::Protect(): protect page 
%#"
+                               B_PRIxADDR "\n", start);
+
+                       // set the new protection flags -- we want to do that 
atomically,
+                       // without changing the accessed or dirty flag
+                       pae_page_table_entry oldEntry;
+                       while (true) {
+                               oldEntry = 
X86PagingMethodPAE::TestAndSetPageTableEntry(
+                                       &pageTable[index],
+                                       (entry & ~(X86_PAE_PTE_PROTECTION_MASK
+                                               | X86_PAE_PTE_MEMORY_TYPE_MASK))
+                                               | newProtectionFlags
+                                               | 
X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
+                                                       memoryType),
+                                       entry);
+                               if (oldEntry == entry)
+                                       break;
+                               entry = oldEntry;
+                       }
+
+                       if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
+                               // Note, that we only need to invalidate the 
address, if the
+                               // accessed flag was set, since only then the 
entry could have been
+                               // in any TLB.
+                               InvalidatePage(start);
+                       }
+               }
+
+               pinner.Unlock();
+       } while (start != 0 && start < end);
+
+       return B_OK;
 }
 
 
 status_t
-X86VMTranslationMapPAE::ClearFlags(addr_t va, uint32 flags)
+X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
 {
-// TODO: Implement!
-       panic("unsupported");
-       return B_UNSUPPORTED;
+       pae_page_directory_entry* pageDirEntry
+               = X86PagingMethodPAE::PageDirEntryForAddress(
+                       fPagingStructures->VirtualPageDirs(), address);
+       if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+               // no pagetable here
+               return B_OK;
+       }
+
+       uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
+               | ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
+
+       struct thread* thread = thread_get_current_thread();
+       ThreadCPUPinner pinner(thread);
+
+       pae_page_table_entry* entry
+               = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                               *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
+                       + address / B_PAGE_SIZE % kPAEPageTableEntryCount;
+
+       // clear out the flags we've been requested to clear
+       pae_page_table_entry oldEntry
+               = X86PagingMethodPAE::ClearPageTableEntryFlags(entry, 
flagsToClear);
+
+       pinner.Unlock();
+
+       if ((oldEntry & flagsToClear) != 0)
+               InvalidatePage(address);
+
+       return B_OK;
 }
 
 
@@ -153,8 +552,118 @@
 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
        bool unmapIfUnaccessed, bool& _modified)
 {
-// TODO: Implement!
-       panic("unsupported");
+       ASSERT(address % B_PAGE_SIZE == 0);
+
+       TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
+               ")\n", address);
+
+       pae_page_directory_entry* pageDirEntry
+               = X86PagingMethodPAE::PageDirEntryForAddress(
+                       fPagingStructures->VirtualPageDirs(), address);
+
+       RecursiveLocker locker(fLock);
+
+       if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
+               return false;
+
+       ThreadCPUPinner pinner(thread_get_current_thread());
+
+       pae_page_table_entry* entry
+               = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                               *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
+                       + address / B_PAGE_SIZE % kPAEPageTableEntryCount;
+
+       // perform the deed
+       pae_page_table_entry oldEntry;
+
+       if (unmapIfUnaccessed) {
+               while (true) {
+                       oldEntry = *entry;
+                       if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
+                               // page mapping not valid
+                               return false;
+                       }
+
+                       if (oldEntry & X86_PAE_PTE_ACCESSED) {
+                               // page was accessed -- just clear the flags
+                               oldEntry = 
X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
+                                       X86_PAE_PTE_ACCESSED | 
X86_PAE_PTE_DIRTY);
+                               break;
+                       }
+
+                       // page hasn't been accessed -- unmap it
+                       if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 
0, oldEntry)
+                                       == oldEntry) {
+                               break;
+                       }
+
+                       // something changed -- check again
+               }
+       } else {
+               oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
+                       X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
+       }
+
+       pinner.Unlock();
+
+       _modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
+
+       if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
+               // Note, that we only need to invalidate the address, if the
+               // accessed flags was set, since only then the entry could have 
been
+               // in any TLB.
+               InvalidatePage(address);
+               Flush();
+
+               return true;
+       }
+
+       if (!unmapIfUnaccessed)
+               return false;
+
+       // We have unmapped the address. Do the "high level" stuff.
+
+       fMapCount--;
+
+       if (area->cache_type == CACHE_TYPE_DEVICE)
+               return false;
+
+       // get the page
+       vm_page* page = vm_lookup_page(
+               (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
+       ASSERT(page != NULL);
+
+       // TODO: Here comes a lot of paging method and even architecture 
independent
+       // code. Refactor!
+
+       // remove the mapping object/decrement the wired_count of the page
+       vm_page_mapping* mapping = NULL;
+       if (area->wiring == B_NO_LOCK) {
+               vm_page_mappings::Iterator iterator = 
page->mappings.GetIterator();
+               while ((mapping = iterator.Next()) != NULL) {
+                       if (mapping->area == area) {
+                               area->mappings.Remove(mapping);
+                               page->mappings.Remove(mapping);
+                               break;
+                       }
+               }
+
+               ASSERT(mapping != NULL);
+       } else
+               page->wired_count--;
+
+       locker.Unlock();
+
+       if (page->wired_count == 0 && page->mappings.IsEmpty())
+               atomic_add(&gMappedPagesCount, -1);
+
+       if (mapping != NULL) {
+               object_cache_free(gPageMappingsObjectCache, mapping,
+                       CACHE_DONT_WAIT_FOR_MEMORY | 
CACHE_DONT_LOCK_KERNEL_SPACE);
+                       // Since this is called by the page daemon, we never 
want to lock
+                       // the kernel address space.
+       }
+
        return false;
 }
 


Other related posts:

  • » [haiku-commits] r37085 - haiku/trunk/src/system/kernel/arch/x86/paging/pae - ingo_weinhold