[haiku-commits] r37014 - haiku/trunk/src/system/kernel/arch/x86

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sat, 5 Jun 2010 00:53:17 +0200 (CEST)

Author: bonefish
Date: 2010-06-05 00:53:17 +0200 (Sat, 05 Jun 2010)
New Revision: 37014
Changeset: http://dev.haiku-os.org/changeset/37014/haiku

Modified:
   haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/arch/x86/x86_paging.h
Log:
Moved the page mapper and the page invalidation cache from
vm_translation_map_arch_info to X86VMTranslationMap where they actually
belong.


Modified: haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h        
2010-06-04 22:52:06 UTC (rev 37013)
+++ haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h        
2010-06-04 22:53:17 UTC (rev 37014)
@@ -9,6 +9,9 @@
 #include <vm/VMTranslationMap.h>
 
 
+#define PAGE_INVALIDATE_CACHE_SIZE 64
+
+
 struct X86VMTranslationMap : VMTranslationMap {
                                                                
X86VMTranslationMap();
        virtual                                         ~X86VMTranslationMap();
@@ -65,6 +68,9 @@
 
 protected:
                        vm_translation_map_arch_info* fArchData;
+                       TranslationMapPhysicalPageMapper* fPageMapper;
+                       int                                     
fInvalidPagesCount;
+                       addr_t                          
fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];
 };
 
 

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-06-04 22:52:06 UTC (rev 37013)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-06-04 22:53:17 UTC (rev 37014)
@@ -278,15 +278,22 @@
 
 
 X86VMTranslationMap::X86VMTranslationMap()
+       :
+       fArchData(NULL),
+       fPageMapper(NULL),
+       fInvalidPagesCount(0)
 {
 }
 
 
 X86VMTranslationMap::~X86VMTranslationMap()
 {
-       if (fArchData->page_mapper != NULL)
-               fArchData->page_mapper->Delete();
+       if (fArchData == NULL)
+               return;
 
+       if (fPageMapper != NULL)
+               fPageMapper->Delete();
+
        if (fArchData->pgdir_virt != NULL) {
                // cycle through and free all of the user space pgtables
                for (uint32 i = VADDR_TO_PDENT(USER_BASE);
@@ -317,25 +324,21 @@
                return B_NO_MEMORY;
 
        fArchData->active_on_cpus = 0;
-       fArchData->num_invalidate_pages = 0;
-       fArchData->page_mapper = NULL;
 
        if (!kernel) {
                // user
                // allocate a physical page mapper
                status_t error = sPhysicalPageMapper
-                       ->CreateTranslationMapPhysicalPageMapper(
-                               &fArchData->page_mapper);
+                       ->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
                if (error != B_OK)
                        return error;
 
                // allocate a pgdir
                fArchData->pgdir_virt = (page_directory_entry *)memalign(
                        B_PAGE_SIZE, B_PAGE_SIZE);
-               if (fArchData->pgdir_virt == NULL) {
-                       fArchData->page_mapper->Delete();
+               if (fArchData->pgdir_virt == NULL)
                        return B_NO_MEMORY;
-               }
+
                phys_addr_t physicalPageDir;
                vm_get_page_mapping(VMAddressSpace::KernelID(),
                        (addr_t)fArchData->pgdir_virt,
@@ -344,7 +347,7 @@
        } else {
                // kernel
                // get the physical page mapper
-               fArchData->page_mapper = sKernelPhysicalPageMapper;
+               fPageMapper = sKernelPhysicalPageMapper;
 
                // we already know the kernel pgdir mapping
                fArchData->pgdir_virt = sKernelVirtualPageDirectory;
@@ -394,7 +397,7 @@
        if (recursive_lock_get_recursion(&fLock) == 1) {
                // we were the first one to grab the lock
                TRACE("clearing invalidated page count\n");
-               fArchData->num_invalidate_pages = 0;
+               fInvalidPagesCount = 0;
        }
 
        return true;
@@ -483,7 +486,7 @@
        struct thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
        index = VADDR_TO_PTENT(va);
 
@@ -531,8 +534,8 @@
        struct thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
-               pd[index]  & X86_PDE_ADDRESS_MASK);
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
+               pd[index] & X86_PDE_ADDRESS_MASK);
 
        for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
                        index++, start += B_PAGE_SIZE) {
@@ -551,13 +554,10 @@
                        // Note, that we only need to invalidate the address, 
if the
                        // accessed flags was set, since only then the entry 
could have been
                        // in any TLB.
-                       if (fArchData->num_invalidate_pages
-                                       < PAGE_INVALIDATE_CACHE_SIZE) {
-                               fArchData->pages_to_invalidate[
-                                       fArchData->num_invalidate_pages] = 
start;
-                       }
+                       if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
+                               fInvalidPages[fInvalidPagesCount] = start;
 
-                       fArchData->num_invalidate_pages++;
+                       fInvalidPagesCount++;
                }
        }
 
@@ -588,7 +588,7 @@
 
        ThreadCPUPinner pinner(thread_get_current_thread());
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
 
        index = VADDR_TO_PTENT(address);
@@ -607,13 +607,10 @@
                // Note, that we only need to invalidate the address, if the
                // accessed flags was set, since only then the entry could have 
been
                // in any TLB.
-               if (fArchData->num_invalidate_pages
-                               < PAGE_INVALIDATE_CACHE_SIZE) {
-                       
fArchData->pages_to_invalidate[fArchData->num_invalidate_pages]
-                               = address;
-               }
+               if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
+                       fInvalidPages[fInvalidPagesCount] = address;
 
-               fArchData->num_invalidate_pages++;
+               fInvalidPagesCount++;
 
                Flush();
 
@@ -715,8 +712,8 @@
                struct thread* thread = thread_get_current_thread();
                ThreadCPUPinner pinner(thread);
 
-               page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
-                       pd[index]  & X86_PDE_ADDRESS_MASK);
+               page_table_entry* pt = fPageMapper->GetPageTableAt(
+                       pd[index] & X86_PDE_ADDRESS_MASK);
 
                for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < 
end);
                                index++, start += B_PAGE_SIZE) {
@@ -730,13 +727,10 @@
                                // Note, that we only need to invalidate the 
address, if the
                                // accessed flags was set, since only then the 
entry could have
                                // been in any TLB.
-                               if (fArchData->num_invalidate_pages
-                                               < PAGE_INVALIDATE_CACHE_SIZE) {
-                                       fArchData->pages_to_invalidate[
-                                               
fArchData->num_invalidate_pages] = start;
-                               }
+                               if (fInvalidPagesCount < 
PAGE_INVALIDATE_CACHE_SIZE)
+                                       fInvalidPages[fInvalidPagesCount] = 
start;
 
-                               fArchData->num_invalidate_pages++;
+                               fInvalidPagesCount++;
                        }
 
                        if (area->cache_type != CACHE_TYPE_DEVICE) {
@@ -855,7 +849,7 @@
 
                        ThreadCPUPinner pinner(thread_get_current_thread());
 
-                       page_table_entry* pt = 
fArchData->page_mapper->GetPageTableAt(
+                       page_table_entry* pt = fPageMapper->GetPageTableAt(
                                pd[index] & X86_PDE_ADDRESS_MASK);
                        page_table_entry oldEntry = clear_page_table_entry(
                                &pt[VADDR_TO_PTENT(address)]);
@@ -874,13 +868,10 @@
                                page->accessed = true;
 
                                if (!deletingAddressSpace) {
-                                       if (fArchData->num_invalidate_pages
-                                                       < 
PAGE_INVALIDATE_CACHE_SIZE) {
-                                               fArchData->pages_to_invalidate[
-                                                       
fArchData->num_invalidate_pages] = address;
-                                       }
+                                       if (fInvalidPagesCount < 
PAGE_INVALIDATE_CACHE_SIZE)
+                                               
fInvalidPages[fInvalidPagesCount] = address;
 
-                                       fArchData->num_invalidate_pages++;
+                                       fInvalidPagesCount++;
                                }
                        }
 
@@ -934,7 +925,7 @@
        struct thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
        page_table_entry entry = pt[VADDR_TO_PTENT(va)];
 
@@ -1040,7 +1031,7 @@
        struct thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
 
        for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
@@ -1070,13 +1061,10 @@
                        // Note, that we only need to invalidate the address, 
if the
                        // accessed flag was set, since only then the entry 
could have been
                        // in any TLB.
-                       if (fArchData->num_invalidate_pages
-                                       < PAGE_INVALIDATE_CACHE_SIZE) {
-                               fArchData->pages_to_invalidate[
-                                       fArchData->num_invalidate_pages] = 
start;
-                       }
+                       if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
+                               fInvalidPages[fInvalidPagesCount] = start;
 
-                       fArchData->num_invalidate_pages++;
+                       fInvalidPagesCount++;
                }
        }
 
@@ -1102,7 +1090,7 @@
        struct thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
        index = VADDR_TO_PTENT(va);
 
@@ -1113,12 +1101,10 @@
        pinner.Unlock();
 
        if ((oldEntry & flagsToClear) != 0) {
-               if (fArchData->num_invalidate_pages < 
PAGE_INVALIDATE_CACHE_SIZE) {
-                       fArchData->pages_to_invalidate[
-                               fArchData->num_invalidate_pages] = va;
-               }
+               if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
+                       fInvalidPages[fInvalidPagesCount] = va;
 
-               fArchData->num_invalidate_pages++;
+               fInvalidPagesCount++;
        }
 
        return B_OK;
@@ -1144,7 +1130,7 @@
 
        ThreadCPUPinner pinner(thread_get_current_thread());
 
-       page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
+       page_table_entry* pt = fPageMapper->GetPageTableAt(
                pd[index] & X86_PDE_ADDRESS_MASK);
 
        index = VADDR_TO_PTENT(address);
@@ -1188,13 +1174,10 @@
                // Note, that we only need to invalidate the address, if the
                // accessed flags was set, since only then the entry could have 
been
                // in any TLB.
-               if (fArchData->num_invalidate_pages
-                               < PAGE_INVALIDATE_CACHE_SIZE) {
-                       
fArchData->pages_to_invalidate[fArchData->num_invalidate_pages]
-                               = address;
-               }
+               if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
+                       fInvalidPages[fInvalidPagesCount] = address;
 
-               fArchData->num_invalidate_pages++;
+               fInvalidPagesCount++;
 
                Flush();
 
@@ -1251,16 +1234,16 @@
 void
 X86VMTranslationMap::Flush()
 {
-       if (fArchData->num_invalidate_pages <= 0)
+       if (fInvalidPagesCount <= 0)
                return;
 
        struct thread* thread = thread_get_current_thread();
        thread_pin_to_current_cpu(thread);
 
-       if (fArchData->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
+       if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
                // invalidate all pages
                TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
-                       fArchData->num_invalidate_pages);
+                       fInvalidPagesCount);
 
                if (IS_KERNEL_MAP(map)) {
                        arch_cpu_global_TLB_invalidate();
@@ -1281,15 +1264,13 @@
                }
        } else {
                TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
-                       fArchData->num_invalidate_pages);
+                       fInvalidPagesCount);
 
-               arch_cpu_invalidate_TLB_list(fArchData->pages_to_invalidate,
-                       fArchData->num_invalidate_pages);
+               arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
 
                if (IS_KERNEL_MAP(map)) {
                        smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
-                               (uint32)fArchData->pages_to_invalidate,
-                               fArchData->num_invalidate_pages, 0, NULL,
+                               (uint32)fInvalidPages, fInvalidPagesCount, 0, 
NULL,
                                SMP_MSG_FLAG_SYNC);
                } else {
                        int cpu = smp_get_current_cpu();
@@ -1297,13 +1278,12 @@
                                & ~((uint32)1 << cpu);
                        if (cpuMask != 0) {
                                smp_send_multicast_ici(cpuMask, 
SMP_MSG_INVALIDATE_PAGE_LIST,
-                                       (uint32)fArchData->pages_to_invalidate,
-                                       fArchData->num_invalidate_pages, 0, 
NULL,
+                                       (uint32)fInvalidPages, 
fInvalidPagesCount, 0, NULL,
                                        SMP_MSG_FLAG_SYNC);
                        }
                }
        }
-       fArchData->num_invalidate_pages = 0;
+       fInvalidPagesCount = 0;
 
        thread_unpin_from_current_cpu(thread);
 }

Modified: haiku/trunk/src/system/kernel/arch/x86/x86_paging.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/x86_paging.h 2010-06-04 22:52:06 UTC 
(rev 37013)
+++ haiku/trunk/src/system/kernel/arch/x86/x86_paging.h 2010-06-04 22:53:17 UTC 
(rev 37014)
@@ -16,8 +16,6 @@
 #include <int.h>
 
 
-#define PAGE_INVALIDATE_CACHE_SIZE 64
-
 #define VADDR_TO_PDENT(va) (((va) / B_PAGE_SIZE) / 1024)
 #define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 1024)
 
@@ -66,12 +64,9 @@
 struct vm_translation_map_arch_info : DeferredDeletable {
        page_directory_entry*           pgdir_virt;
        uint32                                          pgdir_phys;
-       TranslationMapPhysicalPageMapper* page_mapper;
        vint32                                          ref_count;
        vint32                                          active_on_cpus;
                // mask indicating on which CPUs the map is currently used
-       int                                                     
num_invalidate_pages;
-       addr_t                                          
pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
 
                                                                
vm_translation_map_arch_info();
        virtual                                         
~vm_translation_map_arch_info();


Other related posts:

  • » [haiku-commits] r37014 - haiku/trunk/src/system/kernel/arch/x86 - ingo_weinhold