[haiku-commits] r37055 - in haiku/trunk: headers/private/kernel/arch/x86 src/system/kernel/arch/x86

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 8 Jun 2010 02:20:34 +0200 (CEST)

Author: bonefish
Date: 2010-06-08 02:20:34 +0200 (Tue, 08 Jun 2010)
New Revision: 37055
Changeset: http://dev.haiku-os.org/changeset/37055/haiku

Added:
   haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.cpp
   haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.h
   haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.cpp
   haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap32Bit.h
Modified:
   haiku/trunk/headers/private/kernel/arch/x86/arch_vm_translation_map.h
   haiku/trunk/src/system/kernel/arch/x86/Jamfile
   haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp
   haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.h
   haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h
   haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_thread.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/arch/x86/x86_paging.h
   
haiku/trunk/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp
Log:
* Removed i386_translation_map_get_pgdir() and adjusted the one place where it
  was used.
* Renamed X86VMTranslationMap to X86VMTranslationMap32Bit and pulled the paging
  method agnostic part into new base class X86VMTranslationMap.
* Moved X86PagingStructures into its own header/source pair.
* Moved pgdir_virt from X86PagingStructures to X86PagingStructures32Bit where
  it is actually used.


Modified: haiku/trunk/headers/private/kernel/arch/x86/arch_vm_translation_map.h
===================================================================
--- haiku/trunk/headers/private/kernel/arch/x86/arch_vm_translation_map.h       
2010-06-07 23:11:01 UTC (rev 37054)
+++ haiku/trunk/headers/private/kernel/arch/x86/arch_vm_translation_map.h       
2010-06-08 00:20:34 UTC (rev 37055)
@@ -5,13 +5,5 @@
 #ifndef _KERNEL_ARCH_x86_VM_TRANSLATION_MAP_H
 #define _KERNEL_ARCH_x86_VM_TRANSLATION_MAP_H
 
-#include <arch/vm_translation_map.h>
 
-
-// quick function to return the physical pgdir of a mapping, needed for a 
context switch
-#ifdef __cplusplus
-extern "C"
-#endif
-uint32 i386_translation_map_get_pgdir(VMTranslationMap *map);
-
 #endif /* _KERNEL_ARCH_x86_VM_TRANSLATION_MAP_H */

Modified: haiku/trunk/src/system/kernel/arch/x86/Jamfile
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/Jamfile      2010-06-07 23:11:01 UTC 
(rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/Jamfile      2010-06-08 00:20:34 UTC 
(rev 37055)
@@ -42,6 +42,8 @@
        x86_syscalls.cpp
        X86PagingMethod.cpp
        X86PagingMethod32Bit.cpp
+       X86PagingStructures.cpp
+       X86VMTranslationMap.cpp
 
        x86_apic.cpp
        x86_hpet.cpp

Modified: haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp     
2010-06-07 23:11:01 UTC (rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp     
2010-06-08 00:20:34 UTC (rev 37055)
@@ -30,7 +30,7 @@
 
 #include "x86_physical_page_mapper.h"
 #include "x86_physical_page_mapper_large_memory.h"
-#include "X86VMTranslationMap.h"
+#include "X86VMTranslationMap32Bit.h"
 
 
 //#define TRACE_X86_PAGING_METHOD_32_BIT
@@ -91,17 +91,19 @@
                                                                        
B_PAGE_SIZE * 1024)))
 #define FIRST_KERNEL_PGDIR_ENT  (VADDR_TO_PDENT(KERNEL_BASE))
 #define NUM_KERNEL_PGDIR_ENTS   (VADDR_TO_PDENT(KERNEL_SIZE))
-#define IS_KERNEL_MAP(map)             (fPagingStructures->pgdir_phys \
-                                                                       == 
sKernelPhysicalPageDirectory)
 
 
 X86PagingStructures32Bit::X86PagingStructures32Bit()
+       :
+       pgdir_virt(NULL)
 {
 }
 
 
 X86PagingStructures32Bit::~X86PagingStructures32Bit()
 {
+       // free the page dir
+       free(pgdir_virt);
 }
 
 
@@ -209,13 +211,6 @@
 //     #pragma mark -
 
 
-uint32
-i386_translation_map_get_pgdir(VMTranslationMap* map)
-{
-       return static_cast<X86VMTranslationMap*>(map)->PhysicalPageDir();
-}
-
-
 void
 x86_update_all_pgdirs(int index, page_directory_entry e)
 {
@@ -278,16 +273,14 @@
 // #pragma mark - VM ops
 
 
-X86VMTranslationMap::X86VMTranslationMap()
+X86VMTranslationMap32Bit::X86VMTranslationMap32Bit()
        :
-       fPagingStructures(NULL),
-       fPageMapper(NULL),
-       fInvalidPagesCount(0)
+       fPagingStructures(NULL)
 {
 }
 
 
-X86VMTranslationMap::~X86VMTranslationMap()
+X86VMTranslationMap32Bit::~X86VMTranslationMap32Bit()
 {
        if (fPagingStructures == NULL)
                return;
@@ -316,10 +309,12 @@
 
 
 status_t
-X86VMTranslationMap::Init(bool kernel)
+X86VMTranslationMap32Bit::Init(bool kernel)
 {
-       TRACE("X86VMTranslationMap::Init()\n");
+       TRACE("X86VMTranslationMap32Bit::Init()\n");
 
+       X86VMTranslationMap::Init(kernel);
+
        fPagingStructures = new(std::nothrow) X86PagingStructures32Bit;
        if (fPagingStructures == NULL)
                return B_NO_MEMORY;
@@ -380,45 +375,8 @@
 }
 
 
-/*!    Acquires the map's recursive lock, and resets the invalidate pages 
counter
-       in case it's the first locking recursion.
-*/
-bool
-X86VMTranslationMap::Lock()
-{
-       TRACE("%p->X86VMTranslationMap::Lock()\n", this);
-
-       recursive_lock_lock(&fLock);
-       if (recursive_lock_get_recursion(&fLock) == 1) {
-               // we were the first one to grab the lock
-               TRACE("clearing invalidated page count\n");
-               fInvalidPagesCount = 0;
-       }
-
-       return true;
-}
-
-
-/*!    Unlocks the map, and, if we are actually losing the recursive lock,
-       flush all pending changes of this map (ie. flush TLB caches as
-       needed).
-*/
-void
-X86VMTranslationMap::Unlock()
-{
-       TRACE("%p->X86VMTranslationMap::Unlock()\n", this);
-
-       if (recursive_lock_get_recursion(&fLock) == 1) {
-               // we're about to release it for the last time
-               X86VMTranslationMap::Flush();
-       }
-
-       recursive_lock_unlock(&fLock);
-}
-
-
 size_t
-X86VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
+X86VMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
 {
        // If start == 0, the actual base address is not yet known to the 
caller and
        // we shall assume the worst case.
@@ -433,7 +391,7 @@
 
 
 status_t
-X86VMTranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes,
+X86VMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
        uint32 memoryType, vm_page_reservation* reservation)
 {
        TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
@@ -490,7 +448,7 @@
                pt[index]);
 
        put_page_table_entry_in_pgtable(&pt[index], pa, attributes, memoryType,
-               IS_KERNEL_MAP(map));
+               fIsKernelMap);
 
        pinner.Unlock();
 
@@ -504,7 +462,7 @@
 
 
 status_t
-X86VMTranslationMap::Unmap(addr_t start, addr_t end)
+X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
 {
        page_directory_entry *pd = fPagingStructures->pgdir_virt;
 
@@ -566,14 +524,14 @@
        This object shouldn't be locked.
 */
 status_t
-X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
+X86VMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
        bool updatePageQueue)
 {
        ASSERT(address % B_PAGE_SIZE == 0);
 
        page_directory_entry* pd = fPagingStructures->pgdir_virt;
 
-       TRACE("X86VMTranslationMap::UnmapPage(%#" B_PRIxADDR ")\n", address);
+       TRACE("X86VMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", 
address);
 
        RecursiveLocker locker(fLock);
 
@@ -678,7 +636,7 @@
 
 
 void
-X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
+X86VMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
        bool updatePageQueue)
 {
        page_directory_entry* pd = fPagingStructures->pgdir_virt;
@@ -686,7 +644,7 @@
        addr_t start = base;
        addr_t end = base + size;
 
-       TRACE("X86VMTranslationMap::UnmapPages(%p, %#" B_PRIxADDR ", %#"
+       TRACE("X86VMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
                B_PRIxADDR ")\n", area, start, end);
 
        VMAreaMappings queue;
@@ -801,11 +759,12 @@
 
 
 void
-X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
+X86VMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
        bool ignoreTopCachePageFlags)
 {
        if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) 
{
-               X86VMTranslationMap::UnmapPages(area, area->Base(), 
area->Size(), true);
+               X86VMTranslationMap32Bit::UnmapPages(area, area->Base(), 
area->Size(),
+                       true);
                return;
        }
 
@@ -905,7 +864,8 @@
 
 
 status_t
-X86VMTranslationMap::Query(addr_t va, phys_addr_t *_physical, uint32 *_flags)
+X86VMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
+       uint32 *_flags)
 {
        // default the flags to not present
        *_flags = 0;
@@ -948,7 +908,7 @@
 
 
 status_t
-X86VMTranslationMap::QueryInterrupt(addr_t va, phys_addr_t *_physical,
+X86VMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
        uint32 *_flags)
 {
        *_flags = 0;
@@ -985,15 +945,8 @@
 }
 
 
-addr_t
-X86VMTranslationMap::MappedSize() const
-{
-       return fMapCount;
-}
-
-
 status_t
-X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes,
+X86VMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
        uint32 memoryType)
 {
        page_directory_entry *pd = fPagingStructures->pgdir_virt;
@@ -1072,7 +1025,7 @@
 
 
 status_t
-X86VMTranslationMap::ClearFlags(addr_t va, uint32 flags)
+X86VMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
 {
        int index = VADDR_TO_PDENT(va);
        page_directory_entry* pd = fPagingStructures->pgdir_virt;
@@ -1109,15 +1062,15 @@
 
 
 bool
-X86VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
+X86VMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t 
address,
        bool unmapIfUnaccessed, bool& _modified)
 {
        ASSERT(address % B_PAGE_SIZE == 0);
 
        page_directory_entry* pd = fPagingStructures->pgdir_virt;
 
-       TRACE("X86VMTranslationMap::ClearAccessedAndModified(%#" B_PRIxADDR 
")\n",
-               address);
+       TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
+               ")\n", address);
 
        RecursiveLocker locker(fLock);
 
@@ -1228,61 +1181,10 @@
 }
 
 
-void
-X86VMTranslationMap::Flush()
+X86PagingStructures*
+X86VMTranslationMap32Bit::PagingStructures() const
 {
-       if (fInvalidPagesCount <= 0)
-               return;
-
-       struct thread* thread = thread_get_current_thread();
-       thread_pin_to_current_cpu(thread);
-
-       if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
-               // invalidate all pages
-               TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
-                       fInvalidPagesCount);
-
-               if (IS_KERNEL_MAP(map)) {
-                       arch_cpu_global_TLB_invalidate();
-                       smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 
0, 0, 0,
-                               NULL, SMP_MSG_FLAG_SYNC);
-               } else {
-                       cpu_status state = disable_interrupts();
-                       arch_cpu_user_TLB_invalidate();
-                       restore_interrupts(state);
-
-                       int cpu = smp_get_current_cpu();
-                       uint32 cpuMask = fPagingStructures->active_on_cpus
-                               & ~((uint32)1 << cpu);
-                       if (cpuMask != 0) {
-                               smp_send_multicast_ici(cpuMask, 
SMP_MSG_USER_INVALIDATE_PAGES,
-                                       0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
-                       }
-               }
-       } else {
-               TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
-                       fInvalidPagesCount);
-
-               arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
-
-               if (IS_KERNEL_MAP(map)) {
-                       smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
-                               (uint32)fInvalidPages, fInvalidPagesCount, 0, 
NULL,
-                               SMP_MSG_FLAG_SYNC);
-               } else {
-                       int cpu = smp_get_current_cpu();
-                       uint32 cpuMask = fPagingStructures->active_on_cpus
-                               & ~((uint32)1 << cpu);
-                       if (cpuMask != 0) {
-                               smp_send_multicast_ici(cpuMask, 
SMP_MSG_INVALIDATE_PAGE_LIST,
-                                       (uint32)fInvalidPages, 
fInvalidPagesCount, 0, NULL,
-                                       SMP_MSG_FLAG_SYNC);
-                       }
-               }
-       }
-       fInvalidPagesCount = 0;
-
-       thread_unpin_from_current_cpu(thread);
+       return fPagingStructures;
 }
 
 
@@ -1463,7 +1365,7 @@
 
        // get the page table's physical address
        phys_addr_t physicalTable;
-       X86VMTranslationMap* map = static_cast<X86VMTranslationMap*>(
+       X86VMTranslationMap32Bit* map = static_cast<X86VMTranslationMap32Bit*>(
                VMAddressSpace::Kernel()->TranslationMap());
        uint32 dummyFlags;
        cpu_status state = disable_interrupts();
@@ -1472,7 +1374,8 @@
 
        // put the page table into the page directory
        int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024);
-       page_directory_entry* entry = 
&map->PagingStructures()->pgdir_virt[index];
+       page_directory_entry* entry
+               = &map->PagingStructures32Bit()->pgdir_virt[index];
        x86_put_pgtable_in_pgdir(entry, physicalTable,
                B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
        x86_update_all_pgdirs(index, *entry);
@@ -1596,7 +1499,7 @@
 status_t
 X86PagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** 
_map)
 {
-       X86VMTranslationMap* map = new(std::nothrow) X86VMTranslationMap;
+       X86VMTranslationMap32Bit* map = new(std::nothrow) 
X86VMTranslationMap32Bit;
        if (map == NULL)
                return B_NO_MEMORY;
 

Modified: haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.h       
2010-06-07 23:11:01 UTC (rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/X86PagingMethod32Bit.h       
2010-06-08 00:20:34 UTC (rev 37055)
@@ -8,9 +8,12 @@
 
 #include "x86_paging.h"
 #include "X86PagingMethod.h"
+#include "X86PagingStructures.h"
 
 
 struct X86PagingStructures32Bit : X86PagingStructures {
+       page_directory_entry*           pgdir_virt;
+
                                                                
X86PagingStructures32Bit();
        virtual                                         
~X86PagingStructures32Bit();
 

Copied: haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.cpp (from 
rev 37050, haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp)
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.cpp              
                (rev 0)
+++ haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.cpp      
2010-06-08 00:20:34 UTC (rev 37055)
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include "X86PagingStructures.h"
+
+
+X86PagingStructures::X86PagingStructures()
+       :
+       ref_count(1)
+{
+}
+
+
+X86PagingStructures::~X86PagingStructures()
+{
+}

Copied: haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.h (from rev 
37050, haiku/trunk/src/system/kernel/arch/x86/x86_paging.h)
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.h                
                (rev 0)
+++ haiku/trunk/src/system/kernel/arch/x86/X86PagingStructures.h        
2010-06-08 00:20:34 UTC (rev 37055)
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2005-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx
+ * Distributed under the terms of the MIT License.
+ *
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
+ * Distributed under the terms of the NewOS License.
+ */
+#ifndef KERNEL_ARCH_X86_PAGING_STRUCTURES_H
+#define KERNEL_ARCH_X86_PAGING_STRUCTURES_H
+
+
+#include <SupportDefs.h>
+
+#include <heap.h>
+
+
+struct X86PagingStructures : DeferredDeletable {
+       uint32                                          pgdir_phys;
+       vint32                                          ref_count;
+       vint32                                          active_on_cpus;
+               // mask indicating on which CPUs the map is currently used
+
+                                                               
X86PagingStructures();
+       virtual                                         ~X86PagingStructures();
+
+       inline  void                            AddReference();
+       inline  void                            RemoveReference();
+
+       virtual void                            Delete() = 0;
+};
+
+
+inline void
+X86PagingStructures::AddReference()
+{
+       atomic_add(&ref_count, 1);
+}
+
+
+inline void
+X86PagingStructures::RemoveReference()
+{
+       if (atomic_add(&ref_count, -1) == 1)
+               Delete();
+}
+
+
+#endif // KERNEL_ARCH_X86_PAGING_STRUCTURES_H

Added: haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.cpp              
                (rev 0)
+++ haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.cpp      
2010-06-08 00:20:34 UTC (rev 37055)
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
+ * Distributed under the terms of the MIT License.
+ *
+ * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
+ * Distributed under the terms of the NewOS License.
+ */
+
+
+#include "X86VMTranslationMap.h"
+
+#include <thread.h>
+#include <smp.h>
+
+#include "X86PagingStructures.h"
+
+
+//#define TRACE_X86_VM_TRANSLATION_MAP
+#ifdef TRACE_X86_VM_TRANSLATION_MAP
+#      define TRACE(x...) dprintf(x)
+#else
+#      define TRACE(x...) ;
+#endif
+
+
+X86VMTranslationMap::X86VMTranslationMap()
+       :
+       fPageMapper(NULL),
+       fInvalidPagesCount(0)
+{
+}
+
+
+X86VMTranslationMap::~X86VMTranslationMap()
+{
+}
+
+
+status_t
+X86VMTranslationMap::Init(bool kernel)
+{
+       fIsKernelMap = kernel;
+       return B_OK;
+}
+
+
+/*!    Acquires the map's recursive lock, and resets the invalidate pages 
counter
+       in case it's the first locking recursion.
+*/
+bool
+X86VMTranslationMap::Lock()
+{
+       TRACE("%p->X86VMTranslationMap::Lock()\n", this);
+
+       recursive_lock_lock(&fLock);
+       if (recursive_lock_get_recursion(&fLock) == 1) {
+               // we were the first one to grab the lock
+               TRACE("clearing invalidated page count\n");
+               fInvalidPagesCount = 0;
+       }
+
+       return true;
+}
+
+
+/*!    Unlocks the map, and, if we are actually losing the recursive lock,
+       flush all pending changes of this map (ie. flush TLB caches as
+       needed).
+*/
+void
+X86VMTranslationMap::Unlock()
+{
+       TRACE("%p->X86VMTranslationMap::Unlock()\n", this);
+
+       if (recursive_lock_get_recursion(&fLock) == 1) {
+               // we're about to release it for the last time
+               Flush();
+       }
+
+       recursive_lock_unlock(&fLock);
+}
+
+
+addr_t
+X86VMTranslationMap::MappedSize() const
+{
+       return fMapCount;
+}
+
+
+void
+X86VMTranslationMap::Flush()
+{
+       if (fInvalidPagesCount <= 0)
+               return;
+
+       struct thread* thread = thread_get_current_thread();
+       thread_pin_to_current_cpu(thread);
+
+       if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
+               // invalidate all pages
+               TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
+                       fInvalidPagesCount);
+
+               if (fIsKernelMap) {
+                       arch_cpu_global_TLB_invalidate();
+                       smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 
0, 0, 0,
+                               NULL, SMP_MSG_FLAG_SYNC);
+               } else {
+                       cpu_status state = disable_interrupts();
+                       arch_cpu_user_TLB_invalidate();
+                       restore_interrupts(state);
+
+                       int cpu = smp_get_current_cpu();
+                       uint32 cpuMask = PagingStructures()->active_on_cpus
+                               & ~((uint32)1 << cpu);
+                       if (cpuMask != 0) {
+                               smp_send_multicast_ici(cpuMask, 
SMP_MSG_USER_INVALIDATE_PAGES,
+                                       0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
+                       }
+               }
+       } else {
+               TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
+                       fInvalidPagesCount);
+
+               arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
+
+               if (fIsKernelMap) {
+                       smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
+                               (uint32)fInvalidPages, fInvalidPagesCount, 0, 
NULL,
+                               SMP_MSG_FLAG_SYNC);
+               } else {
+                       int cpu = smp_get_current_cpu();
+                       uint32 cpuMask = PagingStructures()->active_on_cpus
+                               & ~((uint32)1 << cpu);
+                       if (cpuMask != 0) {
+                               smp_send_multicast_ici(cpuMask, 
SMP_MSG_INVALIDATE_PAGE_LIST,
+                                       (uint32)fInvalidPages, 
fInvalidPagesCount, 0, NULL,
+                                       SMP_MSG_FLAG_SYNC);
+                       }
+               }
+       }
+       fInvalidPagesCount = 0;
+
+       thread_unpin_from_current_cpu(thread);
+}

Modified: haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h        
2010-06-07 23:11:01 UTC (rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h        
2010-06-08 00:20:34 UTC (rev 37055)
@@ -12,63 +12,30 @@
 #define PAGE_INVALIDATE_CACHE_SIZE 64
 
 
+struct X86PagingStructures;
+class TranslationMapPhysicalPageMapper;
+
+
 struct X86VMTranslationMap : VMTranslationMap {
                                                                
X86VMTranslationMap();
        virtual                                         ~X86VMTranslationMap();
 
                        status_t                        Init(bool kernel);
 
-       inline  X86PagingStructures* PagingStructures() const
-                                                                       { 
return fPagingStructures; }
-       inline  uint32                          PhysicalPageDir() const
-                                                                       { 
return fPagingStructures->pgdir_phys; }
-
        virtual bool                            Lock();
        virtual void                            Unlock();
 
        virtual addr_t                          MappedSize() const;
-       virtual size_t                          MaxPagesNeededToMap(addr_t 
start,
-                                                                       addr_t 
end) const;
 
-       virtual status_t                        Map(addr_t virtualAddress,
-                                                                       
phys_addr_t physicalAddress,
-                                                                       uint32 
attributes, uint32 memoryType,
-                                                                       
vm_page_reservation* reservation);
-       virtual status_t                        Unmap(addr_t start, addr_t end);
-
-       virtual status_t                        UnmapPage(VMArea* area, addr_t 
address,
-                                                                       bool 
updatePageQueue);
-       virtual void                            UnmapPages(VMArea* area, addr_t 
base,
-                                                                       size_t 
size, bool updatePageQueue);
-       virtual void                            UnmapArea(VMArea* area,
-                                                                       bool 
deletingAddressSpace,
-                                                                       bool 
ignoreTopCachePageFlags);
-
-       virtual status_t                        Query(addr_t virtualAddress,
-                                                                       
phys_addr_t* _physicalAddress,
-                                                                       uint32* 
_flags);
-       virtual status_t                        QueryInterrupt(addr_t 
virtualAddress,
-                                                                       
phys_addr_t* _physicalAddress,
-                                                                       uint32* 
_flags);
-
-       virtual status_t                        Protect(addr_t base, addr_t top,
-                                                                       uint32 
attributes, uint32 memoryType);
-
-       virtual status_t                        ClearFlags(addr_t 
virtualAddress,
-                                                                       uint32 
flags);
-
-       virtual bool                            ClearAccessedAndModified(
-                                                                       VMArea* 
area, addr_t address,
-                                                                       bool 
unmapIfUnaccessed,
-                                                                       bool& 
_modified);
-
        virtual void                            Flush();
 
+       virtual X86PagingStructures* PagingStructures() const = 0;
+
 protected:
-                       X86PagingStructures* fPagingStructures;
                        TranslationMapPhysicalPageMapper* fPageMapper;
                        int                                     
fInvalidPagesCount;
                        addr_t                          
fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];
+                       bool                            fIsKernelMap;
 };
 
 

Copied: haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap32Bit.h (from 
rev 37025, haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap.h)
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap32Bit.h           
                (rev 0)
+++ haiku/trunk/src/system/kernel/arch/x86/X86VMTranslationMap32Bit.h   
2010-06-08 00:20:34 UTC (rev 37055)
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_32_BIT_H
+#define KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_32_BIT_H
+
+
+#include "X86VMTranslationMap.h"
+
+
+struct X86PagingStructures32Bit;
+
+
+struct X86VMTranslationMap32Bit : X86VMTranslationMap {
+                                                               
X86VMTranslationMap32Bit();
+       virtual                                         
~X86VMTranslationMap32Bit();
+
+                       status_t                        Init(bool kernel);
+
+       virtual size_t                          MaxPagesNeededToMap(addr_t 
start,
+                                                                       addr_t 
end) const;
+
+       virtual status_t                        Map(addr_t virtualAddress,
+                                                                       
phys_addr_t physicalAddress,
+                                                                       uint32 
attributes, uint32 memoryType,
+                                                                       
vm_page_reservation* reservation);
+       virtual status_t                        Unmap(addr_t start, addr_t end);
+
+       virtual status_t                        UnmapPage(VMArea* area, addr_t 
address,
+                                                                       bool 
updatePageQueue);
+       virtual void                            UnmapPages(VMArea* area, addr_t 
base,
+                                                                       size_t 
size, bool updatePageQueue);
+       virtual void                            UnmapArea(VMArea* area,
+                                                                       bool 
deletingAddressSpace,
+                                                                       bool 
ignoreTopCachePageFlags);
+
+       virtual status_t                        Query(addr_t virtualAddress,
+                                                                       
phys_addr_t* _physicalAddress,
+                                                                       uint32* 
_flags);
+       virtual status_t                        QueryInterrupt(addr_t 
virtualAddress,
+                                                                       
phys_addr_t* _physicalAddress,
+                                                                       uint32* 
_flags);
+
+       virtual status_t                        Protect(addr_t base, addr_t top,
+                                                                       uint32 
attributes, uint32 memoryType);
+
+       virtual status_t                        ClearFlags(addr_t 
virtualAddress,
+                                                                       uint32 
flags);
+
+       virtual bool                            ClearAccessedAndModified(
+                                                                       VMArea* 
area, addr_t address,
+                                                                       bool 
unmapIfUnaccessed,
+                                                                       bool& 
_modified);
+
+       virtual X86PagingStructures* PagingStructures() const;
+       inline  X86PagingStructures32Bit* PagingStructures32Bit() const
+                                                                       { 
return fPagingStructures; }
+
+private:
+                       X86PagingStructures32Bit* fPagingStructures;
+};
+
+
+#endif // KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_32_BIT_H

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp 2010-06-07 23:11:01 UTC 
(rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_cpu.cpp 2010-06-08 00:20:34 UTC 
(rev 37055)
@@ -30,7 +30,7 @@
 #include <boot/kernel_args.h>
 
 #include "interrupts.h"
-#include "x86_paging.h"
+#include "X86PagingStructures.h"
 #include "X86VMTranslationMap.h"
 
 

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_thread.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_thread.cpp      2010-06-07 
23:11:01 UTC (rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_thread.cpp      2010-06-08 
00:20:34 UTC (rev 37055)
@@ -25,8 +25,8 @@
 #include <vm/vm_types.h>
 #include <vm/VMAddressSpace.h>
 
-#include "x86_paging.h"
 #include "x86_syscalls.h"
+#include "X86PagingStructures.h"
 #include "X86VMTranslationMap.h"
 
 
@@ -201,7 +201,8 @@
        if (toAddressSpace == NULL)
                toAddressSpace = VMAddressSpace::Kernel();
 
-       return i386_translation_map_get_pgdir(toAddressSpace->TranslationMap());
+       return 
static_cast<X86VMTranslationMap*>(toAddressSpace->TranslationMap())
+               ->PagingStructures()->pgdir_phys;
 }
 
 

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-06-07 23:11:01 UTC (rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-06-08 00:20:34 UTC (rev 37055)
@@ -24,24 +24,6 @@
 static X86PagingMethod* sPagingMethod;
 
 
-// #pragma mark - X86PagingStructures
-
-
-X86PagingStructures::X86PagingStructures()
-       :
-       pgdir_virt(NULL),
-       ref_count(1)
-{
-}
-
-
-X86PagingStructures::~X86PagingStructures()
-{
-       // free the page dir
-       free(pgdir_virt);
-}
-
-
 // #pragma mark - VM API
 
 

Modified: haiku/trunk/src/system/kernel/arch/x86/x86_paging.h
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/x86_paging.h 2010-06-07 23:11:01 UTC 
(rev 37054)
+++ haiku/trunk/src/system/kernel/arch/x86/x86_paging.h 2010-06-08 00:20:34 UTC 
(rev 37055)
@@ -12,7 +12,6 @@
 
 #include <SupportDefs.h>
 
-#include <heap.h>
 #include <int.h>
 
 
@@ -61,23 +60,6 @@
 typedef uint32 page_directory_entry;
 
 
-struct X86PagingStructures : DeferredDeletable {
-       page_directory_entry*           pgdir_virt;
-       uint32                                          pgdir_phys;
-       vint32                                          ref_count;
-       vint32                                          active_on_cpus;
-               // mask indicating on which CPUs the map is currently used
-
-                                                               
X86PagingStructures();
-       virtual                                         ~X86PagingStructures();
-
-       inline  void                            AddReference();
-       inline  void                            RemoveReference();
-
-       virtual void                            Delete() = 0;
-};
-
-
 void x86_early_prepare_page_tables(page_table_entry* pageTables, addr_t 
address,
                size_t size);
 void x86_put_pgtable_in_pgdir(page_directory_entry* entry,
@@ -121,19 +103,4 @@
 }
 
 
-inline void
-X86PagingStructures::AddReference()
-{
-       atomic_add(&ref_count, 1);
-}
-
-
-inline void
-X86PagingStructures::RemoveReference()
-{
-       if (atomic_add(&ref_count, -1) == 1)
-               Delete();
-}
-
-
 #endif // _KERNEL_ARCH_X86_PAGING_H

Modified: 
haiku/trunk/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp
    2010-06-07 23:11:01 UTC (rev 37054)
+++ 
haiku/trunk/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp
    2010-06-08 00:20:34 UTC (rev 37055)
@@ -37,8 +37,8 @@
 #include <vm/vm_types.h>
 #include <vm/VMAddressSpace.h>
 
-#include "x86_paging.h"
 #include "x86_physical_page_mapper.h"
+#include "X86PagingStructures.h"
 #include "X86VMTranslationMap.h"
 
 


Other related posts:

  • » [haiku-commits] r37055 - in haiku/trunk: headers/private/kernel/arch/x86 src/system/kernel/arch/x86 - ingo_weinhold