[haiku-commits] r37064 - in haiku/trunk/src/system/kernel/arch/x86: . paging/32bit

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 8 Jun 2010 22:35:53 +0200 (CEST)

Author: bonefish
Date: 2010-06-08 22:35:53 +0200 (Tue, 08 Jun 2010)
New Revision: 37064
Changeset: http://dev.haiku-os.org/changeset/37064/haiku

Added:
   
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
Modified:
   haiku/trunk/src/system/kernel/arch/x86/Jamfile
   haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp
   haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.h
   haiku/trunk/src/system/kernel/arch/x86/paging/32bit/paging.h
Log:
* Moved X86VMTranslationMap32Bit to its own source file.
* Made all helper function operating on paging structures static methods of
  X86PagingMethod32Bit.


Modified: haiku/trunk/src/system/kernel/arch/x86/Jamfile
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/Jamfile      2010-06-08 16:43:14 UTC 
(rev 37063)
+++ haiku/trunk/src/system/kernel/arch/x86/Jamfile      2010-06-08 20:35:53 UTC 
(rev 37064)
@@ -52,6 +52,7 @@
        # paging/32bit
        X86PagingMethod32Bit.cpp
        X86PagingStructures32Bit.cpp
+       X86VMTranslationMap32Bit.cpp
 
        x86_apic.cpp
        x86_hpet.cpp

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp    
    2010-06-08 16:43:14 UTC (rev 37063)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp    
    2010-06-08 20:35:53 UTC (rev 37064)
@@ -18,14 +18,8 @@
 #include <arch_system_info.h>
 #include <int.h>
 #include <thread.h>
-#include <slab/Slab.h>
-#include <smp.h>
-#include <util/AutoLock.h>
-#include <util/queue.h>
-#include <vm/vm_page.h>
-#include <vm/vm_priv.h>
+#include <vm/vm.h>
 #include <vm/VMAddressSpace.h>
-#include <vm/VMCache.h>
 
 #include "paging/32bit/X86PagingStructures32Bit.h"
 #include "paging/32bit/X86VMTranslationMap32Bit.h"
@@ -47,1034 +41,6 @@
 static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE;
 
 
-//     #pragma mark -
-
-
-//! TODO: currently assumes this translation map is active
-static status_t
-early_query(addr_t va, phys_addr_t *_physicalAddress)
-{
-       X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
-       if ((method->PageHolePageDir()[VADDR_TO_PDENT(va)] & X86_PDE_PRESENT)
-                       == 0) {
-               // no pagetable here
-               return B_ERROR;
-       }
-
-       page_table_entry* pentry = method->PageHole() + va / B_PAGE_SIZE;
-       if ((*pentry & X86_PTE_PRESENT) == 0) {
-               // page mapping not valid
-               return B_ERROR;
-       }
-
-       *_physicalAddress = *pentry & X86_PTE_ADDRESS_MASK;
-       return B_OK;
-}
-
-
-static inline uint32
-memory_type_to_pte_flags(uint32 memoryType)
-{
-       // ATM we only handle the uncacheable and write-through type 
explicitly. For
-       // all other types we rely on the MTRRs to be set up correctly. Since 
we set
-       // the default memory type to write-back and since the uncacheable type 
in
-       // the PTE overrides any MTRR attribute (though, as per the specs, that 
is
-       // not recommended for performance reasons), this reduces the work we
-       // actually *have* to do with the MTRRs to setting the remaining types
-       // (usually only write-combining for the frame buffer).
-       switch (memoryType) {
-               case B_MTR_UC:
-                       return X86_PTE_CACHING_DISABLED | X86_PTE_WRITE_THROUGH;
-
-               case B_MTR_WC:
-                       // X86_PTE_WRITE_THROUGH would be closer, but the 
combination with
-                       // MTRR WC is "implementation defined" for Pentium 
Pro/II.
-                       return 0;
-
-               case B_MTR_WT:
-                       return X86_PTE_WRITE_THROUGH;
-
-               case B_MTR_WP:
-               case B_MTR_WB:
-               default:
-                       return 0;
-       }
-}
-
-
-static void
-put_page_table_entry_in_pgtable(page_table_entry* entry,
-       phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
-       bool globalPage)
-{
-       page_table_entry page = (physicalAddress & X86_PTE_ADDRESS_MASK)
-               | X86_PTE_PRESENT | (globalPage ? X86_PTE_GLOBAL : 0)
-               | memory_type_to_pte_flags(memoryType);
-
-       // if the page is user accessible, it's automatically
-       // accessible in kernel space, too (but with the same
-       // protection)
-       if ((attributes & B_USER_PROTECTION) != 0) {
-               page |= X86_PTE_USER;
-               if ((attributes & B_WRITE_AREA) != 0)
-                       page |= X86_PTE_WRITABLE;
-       } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
-               page |= X86_PTE_WRITABLE;
-
-       // put it in the page table
-       *(volatile page_table_entry*)entry = page;
-}
-
-
-//     #pragma mark -
-
-
-void
-x86_put_pgtable_in_pgdir(page_directory_entry *entry,
-       phys_addr_t pgtablePhysical, uint32 attributes)
-{
-       *entry = (pgtablePhysical & X86_PDE_ADDRESS_MASK)
-               | X86_PDE_PRESENT
-               | X86_PDE_WRITABLE
-               | X86_PDE_USER;
-               // TODO: we ignore the attributes of the page table - for 
compatibility
-               // with BeOS we allow having user accessible areas in the 
kernel address
-               // space. This is currently being used by some drivers, mainly 
for the
-               // frame buffer. Our current real time data implementation 
makes use of
-               // this fact, too.
-               // We might want to get rid of this possibility one day, 
especially if
-               // we intend to port it to a platform that does not support 
this.
-}
-
-
-void
-x86_early_prepare_page_tables(page_table_entry* pageTables, addr_t address,
-       size_t size)
-{
-       memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
-
-       // put the array of pgtables directly into the kernel pagedir
-       // these will be wired and kept mapped into virtual space to be easy to 
get
-       // to
-       {
-               addr_t virtualTable = (addr_t)pageTables;
-
-               page_directory_entry* pageHolePageDir
-                       = X86PagingMethod32Bit::Method()->PageHolePageDir();
-
-               for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
-                               i++, virtualTable += B_PAGE_SIZE) {
-                       phys_addr_t physicalTable = 0;
-                       early_query(virtualTable, &physicalTable);
-                       page_directory_entry* entry = &pageHolePageDir[
-                               (address / (B_PAGE_SIZE * 1024)) + i];
-                       x86_put_pgtable_in_pgdir(entry, physicalTable,
-                               B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
-               }
-       }
-}
-
-
-// #pragma mark - VM ops
-
-
-X86VMTranslationMap32Bit::X86VMTranslationMap32Bit()
-       :
-       fPagingStructures(NULL)
-{
-}
-
-
-X86VMTranslationMap32Bit::~X86VMTranslationMap32Bit()
-{
-       if (fPagingStructures == NULL)
-               return;
-
-       if (fPageMapper != NULL)
-               fPageMapper->Delete();
-
-       if (fPagingStructures->pgdir_virt != NULL) {
-               // cycle through and free all of the user space pgtables
-               for (uint32 i = VADDR_TO_PDENT(USER_BASE);
-                               i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 
1)); i++) {
-                       if ((fPagingStructures->pgdir_virt[i] & 
X86_PDE_PRESENT) != 0) {
-                               addr_t address = 
fPagingStructures->pgdir_virt[i]
-                                       & X86_PDE_ADDRESS_MASK;
-                               vm_page* page = vm_lookup_page(address / 
B_PAGE_SIZE);
-                               if (!page)
-                                       panic("destroy_tmap: didn't find 
pgtable page\n");
-                               DEBUG_PAGE_ACCESS_START(page);
-                               vm_page_set_state(page, PAGE_STATE_FREE);
-                       }
-               }
-       }
-
-       fPagingStructures->RemoveReference();
-}
-
-
-status_t
-X86VMTranslationMap32Bit::Init(bool kernel)
-{
-       TRACE("X86VMTranslationMap32Bit::Init()\n");
-
-       X86VMTranslationMap::Init(kernel);
-
-       fPagingStructures = new(std::nothrow) X86PagingStructures32Bit;
-       if (fPagingStructures == NULL)
-               return B_NO_MEMORY;
-
-       X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
-
-       if (!kernel) {
-               // user
-               // allocate a physical page mapper
-               status_t error = method->PhysicalPageMapper()
-                       ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
-               if (error != B_OK)
-                       return error;
-
-               // allocate the page directory
-               page_directory_entry* virtualPageDir = 
(page_directory_entry*)memalign(
-                       B_PAGE_SIZE, B_PAGE_SIZE);
-               if (virtualPageDir == NULL)
-                       return B_NO_MEMORY;
-
-               // look up the page directory's physical address
-               phys_addr_t physicalPageDir;
-               vm_get_page_mapping(VMAddressSpace::KernelID(),
-                       (addr_t)virtualPageDir, &physicalPageDir);
-
-               fPagingStructures->Init(virtualPageDir, physicalPageDir,
-                       method->KernelVirtualPageDirectory());
-       } else {
-               // kernel
-               // get the physical page mapper
-               fPageMapper = method->KernelPhysicalPageMapper();
-
-               // we already know the kernel pgdir mapping
-               fPagingStructures->Init(method->KernelVirtualPageDirectory(),
-                       method->KernelPhysicalPageDirectory(), NULL);
-       }
-
-       return B_OK;
-}
-
-
-size_t
-X86VMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
-{
-       // If start == 0, the actual base address is not yet known to the 
caller and
-       // we shall assume the worst case.
-       if (start == 0) {
-               // offset the range so it has the worst possible alignment
-               start = 1023 * B_PAGE_SIZE;
-               end += 1023 * B_PAGE_SIZE;
-       }
-
-       return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
-}
-
-
-status_t
-X86VMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
-       uint32 memoryType, vm_page_reservation* reservation)
-{
-       TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
-
-/*
-       dprintf("pgdir at 0x%x\n", pgdir);
-       dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
-       dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
-       dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
-       dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
-       dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
-*/
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-
-       // check to see if a page table exists for this range
-       uint32 index = VADDR_TO_PDENT(va);
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               phys_addr_t pgtable;
-               vm_page *page;
-
-               // we need to allocate a pgtable
-               page = vm_page_allocate_page(reservation,
-                       PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
-
-               DEBUG_PAGE_ACCESS_END(page);
-
-               pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
-
-               TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", 
pgtable);
-
-               // put it in the pgdir
-               x86_put_pgtable_in_pgdir(&pd[index], pgtable, attributes
-                       | ((attributes & B_USER_PROTECTION) != 0
-                                       ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
-
-               // update any other page directories, if it maps kernel space
-               if (index >= FIRST_KERNEL_PGDIR_ENT
-                       && index < (FIRST_KERNEL_PGDIR_ENT + 
NUM_KERNEL_PGDIR_ENTS)) {
-                       X86PagingStructures32Bit::UpdateAllPageDirs(index, 
pd[index]);
-               }
-
-               fMapCount++;
-       }
-
-       // now, fill in the pentry
-       struct thread* thread = thread_get_current_thread();
-       ThreadCPUPinner pinner(thread);
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-       index = VADDR_TO_PTENT(va);
-
-       ASSERT_PRINT((pt[index] & X86_PTE_PRESENT) == 0,
-               "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, 
va,
-               pt[index]);
-
-       put_page_table_entry_in_pgtable(&pt[index], pa, attributes, memoryType,
-               fIsKernelMap);
-
-       pinner.Unlock();
-
-       // Note: We don't need to invalidate the TLB for this address, as 
previously
-       // the entry was not present and the TLB doesn't cache those entries.
-
-       fMapCount++;
-
-       return 0;
-}
-
-
-status_t
-X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
-{
-       page_directory_entry *pd = fPagingStructures->pgdir_virt;
-
-       start = ROUNDDOWN(start, B_PAGE_SIZE);
-       end = ROUNDUP(end, B_PAGE_SIZE);
-
-       TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
-
-restart:
-       if (start >= end)
-               return B_OK;
-
-       int index = VADDR_TO_PDENT(start);
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               // no pagetable here, move the start up to access the next page 
table
-               start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
-               if (start == 0)
-                       return B_OK;
-               goto restart;
-       }
-
-       struct thread* thread = thread_get_current_thread();
-       ThreadCPUPinner pinner(thread);
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-
-       for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
-                       index++, start += B_PAGE_SIZE) {
-               if ((pt[index] & X86_PTE_PRESENT) == 0) {
-                       // page mapping not valid
-                       continue;
-               }
-
-               TRACE("unmap_tmap: removing page 0x%lx\n", start);
-
-               page_table_entry oldEntry = 
clear_page_table_entry_flags(&pt[index],
-                       X86_PTE_PRESENT);
-               fMapCount--;
-
-               if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-                       // Note, that we only need to invalidate the address, 
if the
-                       // accessed flags was set, since only then the entry 
could have been
-                       // in any TLB.
-                       if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
-                               fInvalidPages[fInvalidPagesCount] = start;
-
-                       fInvalidPagesCount++;
-               }
-       }
-
-       pinner.Unlock();
-
-       goto restart;
-}
-
-
-/*!    Caller must have locked the cache of the page to be unmapped.
-       This object shouldn't be locked.
-*/
-status_t
-X86VMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
-       bool updatePageQueue)
-{
-       ASSERT(address % B_PAGE_SIZE == 0);
-
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-
-       TRACE("X86VMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", 
address);
-
-       RecursiveLocker locker(fLock);
-
-       int index = VADDR_TO_PDENT(address);
-       if ((pd[index] & X86_PDE_PRESENT) == 0)
-               return B_ENTRY_NOT_FOUND;
-
-       ThreadCPUPinner pinner(thread_get_current_thread());
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-
-       index = VADDR_TO_PTENT(address);
-       page_table_entry oldEntry = clear_page_table_entry(&pt[index]);
-
-       pinner.Unlock();
-
-       if ((oldEntry & X86_PTE_PRESENT) == 0) {
-               // page mapping not valid
-               return B_ENTRY_NOT_FOUND;
-       }
-
-       fMapCount--;
-
-       if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-               // Note, that we only need to invalidate the address, if the
-               // accessed flags was set, since only then the entry could have 
been
-               // in any TLB.
-               if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
-                       fInvalidPages[fInvalidPagesCount] = address;
-
-               fInvalidPagesCount++;
-
-               Flush();
-
-               // NOTE: Between clearing the page table entry and Flush() other
-               // processors (actually even this processor with another thread 
of the
-               // same team) could still access the page in question via their 
cached
-               // entry. We can obviously lose a modified flag in this case, 
with the
-               // effect that the page looks unmodified (and might thus be 
recycled),
-               // but is actually modified.
-               // In most cases this is harmless, but for 
vm_remove_all_page_mappings()
-               // this is actually a problem.
-               // Interestingly FreeBSD seems to ignore this problem as well
-               // (cf. pmap_remove_all()), unless I've missed something.
-       }
-
-       if (area->cache_type == CACHE_TYPE_DEVICE)
-               return B_OK;
-
-       // get the page
-       vm_page* page = vm_lookup_page(
-               (oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
-       ASSERT(page != NULL);
-
-       // transfer the accessed/dirty flags to the page
-       if ((oldEntry & X86_PTE_ACCESSED) != 0)
-               page->accessed = true;
-       if ((oldEntry & X86_PTE_DIRTY) != 0)
-               page->modified = true;
-
-       // remove the mapping object/decrement the wired_count of the page
-       vm_page_mapping* mapping = NULL;
-       if (area->wiring == B_NO_LOCK) {
-               vm_page_mappings::Iterator iterator = 
page->mappings.GetIterator();
-               while ((mapping = iterator.Next()) != NULL) {
-                       if (mapping->area == area) {
-                               area->mappings.Remove(mapping);
-                               page->mappings.Remove(mapping);
-                               break;
-                       }
-               }
-
-               ASSERT(mapping != NULL);
-       } else
-               page->wired_count--;
-
-       locker.Unlock();
-
-       if (page->wired_count == 0 && page->mappings.IsEmpty()) {
-               atomic_add(&gMappedPagesCount, -1);
-
-               if (updatePageQueue) {
-                       if (page->Cache()->temporary)
-                               vm_page_set_state(page, PAGE_STATE_INACTIVE);
-                       else if (page->modified)
-                               vm_page_set_state(page, PAGE_STATE_MODIFIED);
-                       else
-                               vm_page_set_state(page, PAGE_STATE_CACHED);
-               }
-       }
-
-       if (mapping != NULL) {
-               bool isKernelSpace = area->address_space == 
VMAddressSpace::Kernel();
-               object_cache_free(gPageMappingsObjectCache, mapping,
-                       CACHE_DONT_WAIT_FOR_MEMORY
-                               | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE 
: 0));
-       }
-
-       return B_OK;
-}
-
-
-void
-X86VMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
-       bool updatePageQueue)
-{
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-
-       addr_t start = base;
-       addr_t end = base + size;
-
-       TRACE("X86VMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
-               B_PRIxADDR ")\n", area, start, end);
-
-       VMAreaMappings queue;
-
-       RecursiveLocker locker(fLock);
-
-       while (start < end) {
-               int index = VADDR_TO_PDENT(start);
-               if ((pd[index] & X86_PDE_PRESENT) == 0) {
-                       // no page table here, move the start up to access the 
next page
-                       // table
-                       start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
-                       if (start == 0)
-                               break;
-                       continue;
-               }
-
-               struct thread* thread = thread_get_current_thread();
-               ThreadCPUPinner pinner(thread);
-
-               page_table_entry* pt = 
(page_table_entry*)fPageMapper->GetPageTableAt(
-                       pd[index] & X86_PDE_ADDRESS_MASK);
-
-               for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < 
end);
-                               index++, start += B_PAGE_SIZE) {
-                       page_table_entry oldEntry = 
clear_page_table_entry(&pt[index]);
-                       if ((oldEntry & X86_PTE_PRESENT) == 0)
-                               continue;
-
-                       fMapCount--;
-
-                       if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-                               // Note, that we only need to invalidate the 
address, if the
-                               // accessed flags was set, since only then the 
entry could have
-                               // been in any TLB.
-                               if (fInvalidPagesCount < 
PAGE_INVALIDATE_CACHE_SIZE)
-                                       fInvalidPages[fInvalidPagesCount] = 
start;
-
-                               fInvalidPagesCount++;
-                       }
-
-                       if (area->cache_type != CACHE_TYPE_DEVICE) {
-                               // get the page
-                               vm_page* page = vm_lookup_page(
-                                       (oldEntry & X86_PTE_ADDRESS_MASK) / 
B_PAGE_SIZE);
-                               ASSERT(page != NULL);
-
-                               DEBUG_PAGE_ACCESS_START(page);
-
-                               // transfer the accessed/dirty flags to the page
-                               if ((oldEntry & X86_PTE_ACCESSED) != 0)
-                                       page->accessed = true;
-                               if ((oldEntry & X86_PTE_DIRTY) != 0)
-                                       page->modified = true;
-
-                               // remove the mapping object/decrement the 
wired_count of the
-                               // page
-                               if (area->wiring == B_NO_LOCK) {
-                                       vm_page_mapping* mapping = NULL;
-                                       vm_page_mappings::Iterator iterator
-                                               = page->mappings.GetIterator();
-                                       while ((mapping = iterator.Next()) != 
NULL) {
-                                               if (mapping->area == area)
-                                                       break;
-                                       }
-
-                                       ASSERT(mapping != NULL);
-
-                                       area->mappings.Remove(mapping);
-                                       page->mappings.Remove(mapping);
-                                       queue.Add(mapping);
-                               } else
-                                       page->wired_count--;
-
-                               if (page->wired_count == 0 && 
page->mappings.IsEmpty()) {
-                                       atomic_add(&gMappedPagesCount, -1);
-
-                                       if (updatePageQueue) {
-                                               if (page->Cache()->temporary)
-                                                       vm_page_set_state(page, 
PAGE_STATE_INACTIVE);
-                                               else if (page->modified)
-                                                       vm_page_set_state(page, 
PAGE_STATE_MODIFIED);
-                                               else
-                                                       vm_page_set_state(page, 
PAGE_STATE_CACHED);
-                                       }
-                               }
-
-                               DEBUG_PAGE_ACCESS_END(page);
-                       }
-               }
-
-               Flush();
-                       // flush explicitly, since we directly use the lock
-
-               pinner.Unlock();
-       }
-
-       // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's 
not
-       // really critical here, as in all cases this method is used, the 
unmapped
-       // area range is unmapped for good (resized/cut) and the pages will 
likely
-       // be freed.
-
-       locker.Unlock();
-
-       // free removed mappings
-       bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
-       uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
-               | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
-       while (vm_page_mapping* mapping = queue.RemoveHead())
-               object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
-}
-
-
-void
-X86VMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
-       bool ignoreTopCachePageFlags)
-{
-       if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) 
{
-               X86VMTranslationMap32Bit::UnmapPages(area, area->Base(), 
area->Size(),
-                       true);
-               return;
-       }
-
-       bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
-
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-
-       RecursiveLocker locker(fLock);
-
-       VMAreaMappings mappings;
-       mappings.MoveFrom(&area->mappings);
-
-       for (VMAreaMappings::Iterator it = mappings.GetIterator();
-                       vm_page_mapping* mapping = it.Next();) {
-               vm_page* page = mapping->page;
-               page->mappings.Remove(mapping);
-
-               VMCache* cache = page->Cache();
-
-               bool pageFullyUnmapped = false;
-               if (page->wired_count == 0 && page->mappings.IsEmpty()) {
-                       atomic_add(&gMappedPagesCount, -1);
-                       pageFullyUnmapped = true;
-               }
-
-               if (unmapPages || cache != area->cache) {
-                       addr_t address = area->Base()
-                               + ((page->cache_offset * B_PAGE_SIZE) - 
area->cache_offset);
-
-                       int index = VADDR_TO_PDENT(address);
-                       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-                               panic("page %p has mapping for area %p (%#" 
B_PRIxADDR "), but "
-                                       "has no page dir entry", page, area, 
address);
-                               continue;
-                       }
-
-                       ThreadCPUPinner pinner(thread_get_current_thread());
-
-                       page_table_entry* pt
-                               = 
(page_table_entry*)fPageMapper->GetPageTableAt(
-                                       pd[index] & X86_PDE_ADDRESS_MASK);
-                       page_table_entry oldEntry = clear_page_table_entry(
-                               &pt[VADDR_TO_PTENT(address)]);
-
-                       pinner.Unlock();
-
-                       if ((oldEntry & X86_PTE_PRESENT) == 0) {
-                               panic("page %p has mapping for area %p (%#" 
B_PRIxADDR "), but "
-                                       "has no page table entry", page, area, 
address);
-                               continue;
-                       }
-
-                       // transfer the accessed/dirty flags to the page and 
invalidate
-                       // the mapping, if necessary
-                       if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-                               page->accessed = true;
-
-                               if (!deletingAddressSpace) {
-                                       if (fInvalidPagesCount < 
PAGE_INVALIDATE_CACHE_SIZE)
-                                               
fInvalidPages[fInvalidPagesCount] = address;
-
-                                       fInvalidPagesCount++;
-                               }
-                       }
-
-                       if ((oldEntry & X86_PTE_DIRTY) != 0)
-                               page->modified = true;
-
-                       if (pageFullyUnmapped) {
-                               DEBUG_PAGE_ACCESS_START(page);
-
-                               if (cache->temporary)
-                                       vm_page_set_state(page, 
PAGE_STATE_INACTIVE);
-                               else if (page->modified)
-                                       vm_page_set_state(page, 
PAGE_STATE_MODIFIED);
-                               else
-                                       vm_page_set_state(page, 
PAGE_STATE_CACHED);
-
-                               DEBUG_PAGE_ACCESS_END(page);
-                       }
-               }
-
-               fMapCount--;
-       }
-
-       Flush();
-               // flush explicitely, since we directly use the lock
-
-       locker.Unlock();
-
-       bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
-       uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
-               | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
-       while (vm_page_mapping* mapping = mappings.RemoveHead())
-               object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
-}
-
-
-status_t
-X86VMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
-       uint32 *_flags)
-{
-       // default the flags to not present
-       *_flags = 0;
-       *_physical = 0;
-
-       int index = VADDR_TO_PDENT(va);
-       page_directory_entry *pd = fPagingStructures->pgdir_virt;
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               // no pagetable here
-               return B_OK;
-       }
-
-       struct thread* thread = thread_get_current_thread();
-       ThreadCPUPinner pinner(thread);
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-       page_table_entry entry = pt[VADDR_TO_PTENT(va)];
-
-       *_physical = entry & X86_PDE_ADDRESS_MASK;
-
-       // read in the page state flags
-       if ((entry & X86_PTE_USER) != 0) {
-               *_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
-                       | B_READ_AREA;
-       }
-
-       *_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
-               | B_KERNEL_READ_AREA
-               | ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
-               | ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
-               | ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
-
-       pinner.Unlock();
-
-       TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
-
-       return B_OK;
-}
-
-
-status_t
-X86VMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
-       uint32 *_flags)
-{
-       *_flags = 0;
-       *_physical = 0;
-
-       int index = VADDR_TO_PDENT(va);
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               // no pagetable here
-               return B_OK;
-       }
-
-       // map page table entry
-       page_table_entry* pt = (page_table_entry*)X86PagingMethod32Bit::Method()
-               ->PhysicalPageMapper()->InterruptGetPageTableAt(
-                       pd[index] & X86_PDE_ADDRESS_MASK);
-       page_table_entry entry = pt[VADDR_TO_PTENT(va)];
-
-       *_physical = entry & X86_PDE_ADDRESS_MASK;
-
-       // read in the page state flags
-       if ((entry & X86_PTE_USER) != 0) {
-               *_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
-                       | B_READ_AREA;
-       }
-
-       *_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
-               | B_KERNEL_READ_AREA
-               | ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
-               | ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
-               | ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
-
-       return B_OK;
-}
-
-
-status_t
-X86VMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
-       uint32 memoryType)
-{
-       page_directory_entry *pd = fPagingStructures->pgdir_virt;
-
-       start = ROUNDDOWN(start, B_PAGE_SIZE);
-
-       TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, 
end,
-               attributes);
-
-       // compute protection flags
-       uint32 newProtectionFlags = 0;
-       if ((attributes & B_USER_PROTECTION) != 0) {
-               newProtectionFlags = X86_PTE_USER;
-               if ((attributes & B_WRITE_AREA) != 0)
-                       newProtectionFlags |= X86_PTE_WRITABLE;
-       } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
-               newProtectionFlags = X86_PTE_WRITABLE;
-
-restart:
-       if (start >= end)
-               return B_OK;
-
-       int index = VADDR_TO_PDENT(start);
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               // no pagetable here, move the start up to access the next page 
table
-               start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
-               if (start == 0)
-                       return B_OK;
-               goto restart;
-       }
-
-       struct thread* thread = thread_get_current_thread();
-       ThreadCPUPinner pinner(thread);
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-
-       for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
-                       index++, start += B_PAGE_SIZE) {
-               page_table_entry entry = pt[index];
-               if ((entry & X86_PTE_PRESENT) == 0) {
-                       // page mapping not valid
-                       continue;
-               }
-
-               TRACE("protect_tmap: protect page 0x%lx\n", start);
-
-               // set the new protection flags -- we want to do that 
atomically,
-               // without changing the accessed or dirty flag
-               page_table_entry oldEntry;
-               while (true) {
-                       oldEntry = test_and_set_page_table_entry(&pt[index],
-                               (entry & ~(X86_PTE_PROTECTION_MASK | 
X86_PTE_MEMORY_TYPE_MASK))
-                                       | newProtectionFlags | 
memory_type_to_pte_flags(memoryType),
-                               entry);
-                       if (oldEntry == entry)
-                               break;
-                       entry = oldEntry;
-               }
-
-               if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-                       // Note, that we only need to invalidate the address, 
if the
-                       // accessed flag was set, since only then the entry 
could have been
-                       // in any TLB.
-                       if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
-                               fInvalidPages[fInvalidPagesCount] = start;
-
-                       fInvalidPagesCount++;
-               }
-       }
-
-       pinner.Unlock();
-
-       goto restart;
-}
-
-
-status_t
-X86VMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
-{
-       int index = VADDR_TO_PDENT(va);
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-       if ((pd[index] & X86_PDE_PRESENT) == 0) {
-               // no pagetable here
-               return B_OK;
-       }
-
-       uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
-               | ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
-
-       struct thread* thread = thread_get_current_thread();
-       ThreadCPUPinner pinner(thread);
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-       index = VADDR_TO_PTENT(va);
-
-       // clear out the flags we've been requested to clear
-       page_table_entry oldEntry
-               = clear_page_table_entry_flags(&pt[index], flagsToClear);
-
-       pinner.Unlock();
-
-       if ((oldEntry & flagsToClear) != 0) {
-               if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
-                       fInvalidPages[fInvalidPagesCount] = va;
-
-               fInvalidPagesCount++;
-       }
-
-       return B_OK;
-}
-
-
-bool
-X86VMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t 
address,
-       bool unmapIfUnaccessed, bool& _modified)
-{
-       ASSERT(address % B_PAGE_SIZE == 0);
-
-       page_directory_entry* pd = fPagingStructures->pgdir_virt;
-
-       TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
-               ")\n", address);
-
-       RecursiveLocker locker(fLock);
-
-       int index = VADDR_TO_PDENT(address);
-       if ((pd[index] & X86_PDE_PRESENT) == 0)
-               return false;
-
-       ThreadCPUPinner pinner(thread_get_current_thread());
-
-       page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
-               pd[index] & X86_PDE_ADDRESS_MASK);
-
-       index = VADDR_TO_PTENT(address);
-
-       // perform the deed
-       page_table_entry oldEntry;
-
-       if (unmapIfUnaccessed) {
-               while (true) {
-                       oldEntry = pt[index];
-                       if ((oldEntry & X86_PTE_PRESENT) == 0) {
-                               // page mapping not valid
-                               return false;
-                       }
-
-                       if (oldEntry & X86_PTE_ACCESSED) {
-                               // page was accessed -- just clear the flags
-                               oldEntry = 
clear_page_table_entry_flags(&pt[index],
-                                       X86_PTE_ACCESSED | X86_PTE_DIRTY);
-                               break;
-                       }
-
-                       // page hasn't been accessed -- unmap it
-                       if (test_and_set_page_table_entry(&pt[index], 0, 
oldEntry)
-                                       == oldEntry) {
-                               break;
-                       }
-
-                       // something changed -- check again
-               }
-       } else {
-               oldEntry = clear_page_table_entry_flags(&pt[index],
-                       X86_PTE_ACCESSED | X86_PTE_DIRTY);
-       }
-
-       pinner.Unlock();
-
-       _modified = (oldEntry & X86_PTE_DIRTY) != 0;
-
-       if ((oldEntry & X86_PTE_ACCESSED) != 0) {
-               // Note, that we only need to invalidate the address, if the
-               // accessed flags was set, since only then the entry could have 
been

[... truncated: 1325 lines follow ...]

Other related posts:

  • » [haiku-commits] r37064 - in haiku/trunk/src/system/kernel/arch/x86: . paging/32bit - ingo_weinhold