Author: bonefish Date: 2010-06-10 19:38:47 +0200 (Thu, 10 Jun 2010) New Revision: 37088 Changeset: http://dev.haiku-os.org/changeset/37088/haiku Modified: haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h Log: Added methods {Allocate,Free}32BitPage(), which allocate+map/unmap+free a page with a 32 bit physical address (needed for the PDPTs). A small set of free pages is cached, so the rather expensive vm_page_allocate_page_run() can be avoided most of the time. Modified: haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp 2010-06-10 17:31:57 UTC (rev 37087) +++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp 2010-06-10 17:38:47 UTC (rev 37088) @@ -13,7 +13,9 @@ #include <stdlib.h> #include <string.h> +#include <util/AutoLock.h> #include <vm/vm.h> +#include <vm/vm_page.h> #include <vm/VMAddressSpace.h> #include "paging/32bit/paging.h" @@ -38,6 +40,10 @@ using X86LargePhysicalPageMapper::PhysicalPageSlot; +// number of 32 bit pages that will be cached +static const page_num_t kMaxFree32BitPagesCount = 32; + + // #pragma mark - ToPAESwitcher @@ -472,8 +478,11 @@ X86PagingMethodPAE::X86PagingMethodPAE() : fPhysicalPageMapper(NULL), - fKernelPhysicalPageMapper(NULL) + fKernelPhysicalPageMapper(NULL), + fFreePages(NULL), + fFreePagesCount(0) { + mutex_init(&fFreePagesLock, "x86 PAE free pages"); } @@ -528,6 +537,14 @@ if (error != B_OK) return error; + // The early physical page mapping mechanism is no longer needed. Unmap the + // slot. + *fFreeVirtualSlotPTE = 0; + invalidate_TLB(fFreeVirtualSlot); + + fFreeVirtualSlotPTE = NULL; + fFreeVirtualSlot = 0; + return B_OK; } @@ -645,6 +662,71 @@ } +void* +X86PagingMethodPAE::Allocate32BitPage(phys_addr_t& _physicalAddress, + void*& _handle) +{ + // get a free page + MutexLocker locker(fFreePagesLock); + vm_page* page; + if (fFreePages != NULL) { + page = fFreePages; + fFreePages = page->cache_next; + fFreePagesCount--; + locker.Unlock(); + } else { + // no pages -- allocate one + locker.Unlock(); + page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 0, 0x100000000LL, 1, + VM_PRIORITY_SYSTEM); + DEBUG_PAGE_ACCESS_END(page); + if (page == NULL) + return NULL; + } + + // map the page + phys_addr_t physicalAddress + = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE; + addr_t virtualAddress; + if (fPhysicalPageMapper->GetPage(physicalAddress, &virtualAddress, &_handle) + != B_OK) { + // mapping failed -- free page + locker.Lock(); + page->cache_next = fFreePages; + fFreePages = page; + fFreePagesCount++; + return NULL; + } + + _physicalAddress = physicalAddress; + return (void*)virtualAddress; +} + + +void +X86PagingMethodPAE::Free32BitPage(void* address, phys_addr_t physicalAddress, + void* handle) +{ + // unmap the page + fPhysicalPageMapper->PutPage((addr_t)address, handle); + + // free it + vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); + MutexLocker locker(fFreePagesLock); + if (fFreePagesCount < kMaxFree32BitPagesCount) { + // cache not full yet -- cache it + page->cache_next = fFreePages; + fFreePages = page; + fFreePagesCount++; + } else { + // cache full -- free it + locker.Unlock(); + DEBUG_PAGE_ACCESS_START(page); + vm_page_free(NULL, page); + } +} + + bool X86PagingMethodPAE::_EarlyQuery(addr_t virtualAddress, phys_addr_t* _physicalAddress) Modified: haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h =================================================================== --- haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h 2010-06-10 17:31:57 UTC (rev 37087) +++ haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h 2010-06-10 17:38:47 UTC (rev 37088) @@ -8,6 +8,9 @@ #include <KernelExport.h> +#include <lock.h> +#include <vm/vm_types.h> + #include "paging/pae/paging.h" #include "paging/X86PagingMethod.h" #include "paging/X86PagingStructures.h" @@ -41,6 +44,12 @@ virtual bool IsKernelPageAccessible(addr_t virtualAddress, uint32 protection); + void* Allocate32BitPage( + phys_addr_t& _physicalAddress, + void*& _handle); + void Free32BitPage(void* address, + phys_addr_t physicalAddress, void* handle); + inline X86PhysicalPageMapper* PhysicalPageMapper() const { return fPhysicalPageMapper; } inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const @@ -108,6 +117,10 @@ phys_addr_t fKernelPhysicalPageDirs[4]; addr_t fFreeVirtualSlot; pae_page_table_entry* fFreeVirtualSlotPTE; + + mutex fFreePagesLock; + vm_page* fFreePages; + page_num_t fFreePagesCount; };