[haiku-commits] haiku: hrev43387 - in src/system/kernel: arch/x86/paging/pae arch/x86/paging/32bit vm

  • From: mmlr@xxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sat, 3 Dec 2011 19:47:03 +0100 (CET)

hrev43387 adds 1 changeset to branch 'master'
old head: f990399943b372ab46f4d9623ec0808d4e2ed8ea
new head: 643cf35ee8b8ba35ef88ca9daba4ca9ab65cf1ff

----------------------------------------------------------------------------

643cf35: Add debug helper functions to mark pages present.
  
  They can be used to mark pages as present/non-present without actually
  unmapping them. Marking pages as non-present causes every access to
  fault. We can use that for debugging as it allows us to "read protect"
  individual kernel pages.

                                            [ Michael Lotz <mmlr@xxxxxxxx> ]

----------------------------------------------------------------------------

Revision:    hrev43387
Commit:      643cf35ee8b8ba35ef88ca9daba4ca9ab65cf1ff
URL:         http://cgit.haiku-os.org/haiku/commit/?id=643cf35
Author:      Michael Lotz <mmlr@xxxxxxxx>
Date:        Sat Dec  3 18:36:01 2011 UTC

----------------------------------------------------------------------------

6 files changed, 130 insertions(+), 0 deletions(-)
headers/private/kernel/vm/VMTranslationMap.h       |    3 +
.../x86/paging/32bit/X86VMTranslationMap32Bit.cpp  |   55 +++++++++++++++
.../x86/paging/32bit/X86VMTranslationMap32Bit.h    |    3 +
.../arch/x86/paging/pae/X86VMTranslationMapPAE.cpp |   58 ++++++++++++++++
.../arch/x86/paging/pae/X86VMTranslationMapPAE.h   |    3 +
src/system/kernel/vm/VMTranslationMap.cpp          |    8 ++

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/vm/VMTranslationMap.h 
b/headers/private/kernel/vm/VMTranslationMap.h
index 3d50def..d0ad63d 100644
--- a/headers/private/kernel/vm/VMTranslationMap.h
+++ b/headers/private/kernel/vm/VMTranslationMap.h
@@ -36,6 +36,9 @@ struct VMTranslationMap {
                                                                        
vm_page_reservation* reservation) = 0;
        virtual status_t                        Unmap(addr_t start, addr_t end) 
= 0;
 
+       virtual status_t                        DebugMarkRangePresent(addr_t 
start, addr_t end,
+                                                                       bool 
markPresent);
+
        // map not locked
        virtual status_t                        UnmapPage(VMArea* area, addr_t 
address,
                                                                        bool 
updatePageQueue) = 0;
diff --git 
a/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp 
b/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
index 450257e..939b4d2 100644
--- a/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
+++ b/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
@@ -261,6 +261,61 @@ X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
 }
 
 
+status_t
+X86VMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
+       bool markPresent)
+{
+       start = ROUNDDOWN(start, B_PAGE_SIZE);
+       if (start >= end)
+               return B_OK;
+
+       page_directory_entry *pd = fPagingStructures->pgdir_virt;
+
+       do {
+               int index = VADDR_TO_PDENT(start);
+               if ((pd[index] & X86_PDE_PRESENT) == 0) {
+                       // no page table here, move the start up to access the 
next page
+                       // table
+                       start = ROUNDUP(start + 1, kPageTableAlignment);
+                       continue;
+               }
+
+               Thread* thread = thread_get_current_thread();
+               ThreadCPUPinner pinner(thread);
+
+               page_table_entry* pt = 
(page_table_entry*)fPageMapper->GetPageTableAt(
+                       pd[index] & X86_PDE_ADDRESS_MASK);
+
+               for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < 
end);
+                               index++, start += B_PAGE_SIZE) {
+                       if ((pt[index] & X86_PTE_PRESENT) == 0) {
+                               if (!markPresent)
+                                       continue;
+
+                               
X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
+                                       X86_PTE_PRESENT);
+                       } else {
+                               if (markPresent)
+                                       continue;
+
+                               page_table_entry oldEntry
+                                       = 
X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
+                                               X86_PTE_PRESENT);
+
+                               if ((oldEntry & X86_PTE_ACCESSED) != 0) {
+                                       // Note, that we only need to 
invalidate the address, if the
+                                       // accessed flags was set, since only 
then the entry could
+                                       // have been in any TLB.
+                                       InvalidatePage(start);
+                               }
+                       }
+               }
+       } while (start != 0 && start < end);
+
+       return B_OK;
+}
+
+
 /*!    Caller must have locked the cache of the page to be unmapped.
        This object shouldn't be locked.
 */
diff --git a/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.h 
b/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.h
index d79a4f3..db99659 100644
--- a/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.h
+++ b/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.h
@@ -27,6 +27,9 @@ struct X86VMTranslationMap32Bit : X86VMTranslationMap {
                                                                        
vm_page_reservation* reservation);
        virtual status_t                        Unmap(addr_t start, addr_t end);
 
+       virtual status_t                        DebugMarkRangePresent(addr_t 
start, addr_t end,
+                                                                       bool 
markPresent);
+
        virtual status_t                        UnmapPage(VMArea* area, addr_t 
address,
                                                                        bool 
updatePageQueue);
        virtual void                            UnmapPages(VMArea* area, addr_t 
base,
diff --git a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp 
b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
index 1d522b0..8f086b5 100644
--- a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
+++ b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
@@ -301,6 +301,64 @@ X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
 }
 
 
+status_t
+X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
+       bool markPresent)
+{
+       start = ROUNDDOWN(start, B_PAGE_SIZE);
+       if (start >= end)
+               return B_OK;
+
+       do {
+               pae_page_directory_entry* pageDirEntry
+                       = X86PagingMethodPAE::PageDirEntryForAddress(
+                               fPagingStructures->VirtualPageDirs(), start);
+               if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
+                       // no page table here, move the start up to access the 
next page
+                       // table
+                       start = ROUNDUP(start + 1, kPAEPageTableRange);
+                       continue;
+               }
+
+               Thread* thread = thread_get_current_thread();
+               ThreadCPUPinner pinner(thread);
+
+               pae_page_table_entry* pageTable
+                       = (pae_page_table_entry*)fPageMapper->GetPageTableAt(
+                               *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
+
+               uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
+               for (; index < kPAEPageTableEntryCount && start < end;
+                               index++, start += B_PAGE_SIZE) {
+
+                       if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
+                               if (!markPresent)
+                                       continue;
+
+                               X86PagingMethodPAE::SetPageTableEntryFlags(
+                                       &pageTable[index], X86_PAE_PTE_PRESENT);
+                       } else {
+                               if (markPresent)
+                                       continue;
+
+                               pae_page_table_entry oldEntry
+                                       = 
X86PagingMethodPAE::ClearPageTableEntryFlags(
+                                               &pageTable[index], 
X86_PAE_PTE_PRESENT);
+
+                               if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
+                                       // Note, that we only need to 
invalidate the address, if the
+                                       // accessed flags was set, since only 
then the entry could
+                                       // have been in any TLB.
+                                       InvalidatePage(start);
+                               }
+                       }
+               }
+       } while (start != 0 && start < end);
+
+       return B_OK;
+}
+
+
 /*!    Caller must have locked the cache of the page to be unmapped.
        This object shouldn't be locked.
 */
diff --git a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.h 
b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.h
index a33f4d4..59bd5ff 100644
--- a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.h
+++ b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.h
@@ -30,6 +30,9 @@ struct X86VMTranslationMapPAE : X86VMTranslationMap {
                                                                        
vm_page_reservation* reservation);
        virtual status_t                        Unmap(addr_t start, addr_t end);
 
+       virtual status_t                        DebugMarkRangePresent(addr_t 
start, addr_t end,
+                                                                       bool 
markPresent);
+
        virtual status_t                        UnmapPage(VMArea* area, addr_t 
address,
                                                                        bool 
updatePageQueue);
        virtual void                            UnmapPages(VMArea* area, addr_t 
base,
diff --git a/src/system/kernel/vm/VMTranslationMap.cpp 
b/src/system/kernel/vm/VMTranslationMap.cpp
index a2acdcc..dafbaed 100644
--- a/src/system/kernel/vm/VMTranslationMap.cpp
+++ b/src/system/kernel/vm/VMTranslationMap.cpp
@@ -31,6 +31,14 @@ VMTranslationMap::~VMTranslationMap()
 }
 
 
+status_t
+VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
+       bool markPresent)
+{
+       return B_NOT_SUPPORTED;
+}
+
+
 /*!    Unmaps a range of pages of an area.
 
        The default implementation just iterates over all virtual pages of the


Other related posts:

  • » [haiku-commits] haiku: hrev43387 - in src/system/kernel: arch/x86/paging/pae arch/x86/paging/32bit vm - mmlr