[haiku-commits] r37460 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/arch/x86/paging/32bit src/system/kernel/arch/x86/paging/pae src/system/kernel/slab src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sat, 10 Jul 2010 17:08:13 +0200 (CEST)

Author: bonefish
Date: 2010-07-10 17:08:13 +0200 (Sat, 10 Jul 2010)
New Revision: 37460
Changeset: http://dev.haiku-os.org/changeset/37460/haiku
Ticket: http://dev.haiku-os.org/ticket/6288

Modified:
   haiku/trunk/headers/private/kernel/vm/VMCache.h
   haiku/trunk/headers/private/kernel/vm/vm_types.h
   
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
   haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
   haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
   haiku/trunk/src/system/kernel/vm/PageCacheLocker.cpp
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
   haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
* Moved the vm_page initialization from vm_page.cpp:vm_page_init() to the new
  vm_page::Init().
* Made vm_page::wired_count private and added accessor methods.
* Added VMCache::fWiredPagesCount (the number of wired pages the cache
  contains) and accessor methods.
* Made more use of vm_page::IsMapped().
* vm_copy_on_write_area(): Added vm_page_reservation* parameter that can be
  used to request a special handling for wired pages. If given the wired pages
  are replaced by copies and the original pages are moved to the upper cache.
* vm_copy_area():
  - We don't need to do any wired ranges handling, if the source area is a
    B_SHARED_AREA, since we don't touch the area's mappings in this case.
  - We no longer wait for wired ranges of the concerned areas to disappear.
    Instead we use the new vm_copy_on_write_area() feature and just let it
    copy the wired pages. This fixes #6288, an issue introduced with the use
    of user mutexes in libroot: When executing multiple concurrent fork()s all
    but the first one would wait on the fork mutex, which (being a user mutex)
    would wire a page that the vm_copy_area() of the first fork() would wait
    for.


Modified: haiku/trunk/headers/private/kernel/vm/VMCache.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-07-10 14:39:15 UTC 
(rev 37459)
+++ haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-07-10 15:08:13 UTC 
(rev 37460)
@@ -101,6 +101,10 @@
                        void                            MovePage(vm_page* page);
                        void                            MoveAllPages(VMCache* 
fromCache);
 
+       inline  page_num_t                      WiredPagesCount() const;
+       inline  void                            IncrementWiredPagesCount();
+       inline  void                            DecrementWiredPagesCount();
+
                        void                            AddConsumer(VMCache* 
consumer);
 
                        status_t                        
InsertAreaLocked(VMArea* area);
@@ -197,6 +201,7 @@
                        PageEventWaiter*        fPageEventWaiters;
                        void*                           fUserData;
                        VMCacheRef*                     fCacheRef;
+                       page_num_t                      fWiredPagesCount;
 };
 
 
@@ -308,6 +313,49 @@
 }
 
 
+page_num_t
+VMCache::WiredPagesCount() const
+{
+       return fWiredPagesCount;
+}
+
+
+void
+VMCache::IncrementWiredPagesCount()
+{
+       ASSERT(fWiredPagesCount < page_count);
+
+       fWiredPagesCount++;
+}
+
+
+void
+VMCache::DecrementWiredPagesCount()
+{
+       ASSERT(fWiredPagesCount > 0);
+
+       fWiredPagesCount--;
+}
+
+
+// vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
+
+inline void
+vm_page::IncrementWiredCount()
+{
+       if (fWiredCount++ == 0)
+               cache_ref->cache->IncrementWiredPagesCount();
+}
+
+
+inline void
+vm_page::DecrementWiredCount()
+{
+       if (--fWiredCount == 0)
+               cache_ref->cache->DecrementWiredPagesCount();
+}
+
+
 #ifdef __cplusplus
 extern "C" {
 #endif

Modified: haiku/trunk/headers/private/kernel/vm/vm_types.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-07-10 14:39:15 UTC 
(rev 37459)
+++ haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-07-10 15:08:13 UTC 
(rev 37460)
@@ -10,6 +10,8 @@
 #define _KERNEL_VM_VM_TYPES_H
 
 
+#include <new>
+
 #include <arch/vm_types.h>
 #include <condition_variable.h>
 #include <kernel.h>
@@ -142,8 +144,9 @@
        uint8                                   unused : 1;
 
        uint8                                   usage_count;
-       uint16                                  wired_count;
 
+       inline void Init(page_num_t pageNumber);
+
        VMCacheRef* CacheRef() const                    { return cache_ref; }
        void SetCacheRef(VMCacheRef* cacheRef)  { this->cache_ref = cacheRef; }
 
@@ -151,11 +154,19 @@
                { return cache_ref != NULL ? cache_ref->cache : NULL; }
 
        bool IsMapped() const
-               { return wired_count > 0 || !mappings.IsEmpty(); }
+               { return fWiredCount > 0 || !mappings.IsEmpty(); }
 
        uint8 State() const                             { return state; }
        void InitState(uint8 newState);
        void SetState(uint8 newState);
+
+       inline uint16 WiredCount() const        { return fWiredCount; }
+       inline void IncrementWiredCount();
+       inline void DecrementWiredCount();
+               // both implemented in VMCache.h to avoid inclusion here
+
+private:
+       uint16                                  fWiredCount;
 };
 
 
@@ -180,6 +191,25 @@
 #define VM_PAGE_ALLOC_BUSY     0x00000020
 
 
+inline void
+vm_page::Init(page_num_t pageNumber)
+{
+       physical_page_number = pageNumber;
+       InitState(PAGE_STATE_FREE);
+       new(&mappings) vm_page_mappings();
+       fWiredCount = 0;
+       usage_count = 0;
+       busy_writing = false;
+       SetCacheRef(NULL);
+       #if DEBUG_PAGE_QUEUE
+               queue = NULL;
+       #endif
+       #if DEBUG_PAGE_ACCESS
+               accessing_thread = -1;
+       #endif
+}
+
+
 #if DEBUG_PAGE_ACCESS
 #      include <thread.h>
 

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
    2010-07-10 14:39:15 UTC (rev 37459)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp
    2010-07-10 15:08:13 UTC (rev 37460)
@@ -409,9 +409,9 @@
                                        page->mappings.Remove(mapping);
                                        queue.Add(mapping);
                                } else
-                                       page->wired_count--;
+                                       page->DecrementWiredCount();
 
-                               if (page->wired_count == 0 && 
page->mappings.IsEmpty()) {
+                               if (!page->IsMapped()) {
                                        atomic_add(&gMappedPagesCount, -1);
 
                                        if (updatePageQueue) {
@@ -475,7 +475,7 @@
                VMCache* cache = page->Cache();
 
                bool pageFullyUnmapped = false;
-               if (page->wired_count == 0 && page->mappings.IsEmpty()) {
+               if (!page->IsMapped()) {
                        atomic_add(&gMappedPagesCount, -1);
                        pageFullyUnmapped = true;
                }

Modified: 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp
===================================================================
--- 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp    
    2010-07-10 14:39:15 UTC (rev 37459)
+++ 
haiku/trunk/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp    
    2010-07-10 15:08:13 UTC (rev 37460)
@@ -451,9 +451,9 @@
                                        page->mappings.Remove(mapping);
                                        queue.Add(mapping);
                                } else
-                                       page->wired_count--;
+                                       page->DecrementWiredCount();
 
-                               if (page->wired_count == 0 && 
page->mappings.IsEmpty()) {
+                               if (!page->IsMapped()) {
                                        atomic_add(&gMappedPagesCount, -1);
 
                                        if (updatePageQueue) {
@@ -515,7 +515,7 @@
                VMCache* cache = page->Cache();
 
                bool pageFullyUnmapped = false;
-               if (page->wired_count == 0 && page->mappings.IsEmpty()) {
+               if (!page->IsMapped()) {
                        atomic_add(&gMappedPagesCount, -1);
                        pageFullyUnmapped = true;
                }

Modified: haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-07-10 
14:39:15 UTC (rev 37459)
+++ haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-07-10 
15:08:13 UTC (rev 37460)
@@ -1382,7 +1382,7 @@
                vm_page* page = vm_page_allocate_page(&reservation, 
PAGE_STATE_WIRED);
                cache->InsertPage(page, offset);
 
-               page->wired_count++;
+               page->IncrementWiredCount();
                atomic_add(&gMappedPagesCount, 1);
                DEBUG_PAGE_ACCESS_END(page);
 
@@ -1435,7 +1435,7 @@
 
                DEBUG_PAGE_ACCESS_START(page);
 
-               page->wired_count--;
+               page->DecrementWiredCount();
 
                cache->RemovePage(page);
                        // the iterator is remove-safe

Modified: haiku/trunk/src/system/kernel/vm/PageCacheLocker.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/PageCacheLocker.cpp        2010-07-10 
14:39:15 UTC (rev 37459)
+++ haiku/trunk/src/system/kernel/vm/PageCacheLocker.cpp        2010-07-10 
15:08:13 UTC (rev 37460)
@@ -14,7 +14,7 @@
 {
        if (page->busy || page->State() == PAGE_STATE_WIRED
                || page->State() == PAGE_STATE_FREE || page->State() == 
PAGE_STATE_CLEAR
-               || page->State() == PAGE_STATE_UNUSED || page->wired_count > 0)
+               || page->State() == PAGE_STATE_UNUSED || page->WiredCount() > 0)
                return true;
 
        return false;

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-07-10 14:39:15 UTC 
(rev 37459)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-07-10 15:08:13 UTC 
(rev 37460)
@@ -594,6 +594,7 @@
        committed_size = 0;
        temporary = 0;
        page_count = 0;
+       fWiredPagesCount = 0;
        type = cacheType;
        fPageEventWaiters = NULL;
 
@@ -634,7 +635,7 @@
 
        // free all of the pages in the cache
        while (vm_page* page = pages.Root()) {
-               if (!page->mappings.IsEmpty() || page->wired_count != 0) {
+               if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
                        panic("remove page %p from cache %p: page still has 
mappings!\n",
                                page, this);
                }
@@ -758,6 +759,9 @@
 #endif // KDEBUG
 
        pages.Insert(page);
+
+       if (page->WiredCount() > 0)
+               IncrementWiredPagesCount();
 }
 
 
@@ -781,6 +785,9 @@
        pages.Remove(page);
        page_count--;
        page->SetCacheRef(NULL);
+
+       if (page->WiredCount() > 0)
+               DecrementWiredPagesCount();
 }
 
 
@@ -804,6 +811,12 @@
        pages.Insert(page);
        page_count++;
        page->SetCacheRef(fCacheRef);
+
+       if (page->WiredCount() > 0) {
+               IncrementWiredPagesCount();
+               oldCache->DecrementWiredPagesCount();
+       }
+
        T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
 }
 
@@ -821,6 +834,8 @@
        std::swap(fromCache->pages, pages);
        page_count = fromCache->page_count;
        fromCache->page_count = 0;
+       fWiredPagesCount = fromCache->fWiredPagesCount;
+       fromCache->fWiredPagesCount = 0;
 
        // swap the VMCacheRefs
        mutex_lock(&sCacheListLock);
@@ -1077,7 +1092,7 @@
                        // remove the page and put it into the free queue
                        DEBUG_PAGE_ACCESS_START(page);
                        vm_remove_all_page_mappings(page);
-                       ASSERT(page->wired_count == 0);
+                       ASSERT(page->WiredCount() == 0);
                                // TODO: Find a real solution! If the page is 
wired
                                // temporarily (e.g. by lock_memory()), we 
actually must not
                                // unmap it!
@@ -1123,7 +1138,7 @@
                                continue;
 
                        // We can't remove mapped pages.
-                       if (page->wired_count > 0 || !page->mappings.IsEmpty())
+                       if (page->IsMapped())
                                return B_BUSY;
 
                        DEBUG_PAGE_ACCESS_START(page);
@@ -1311,7 +1326,7 @@
                                        " state %u (%s) wired_count %u\n", page,
                                        page->physical_page_number, 
page->cache_offset,
                                        page->State(), 
page_state_to_string(page->State()),
-                                       page->wired_count);
+                                       page->WiredCount());
                        } else {
                                kprintf("\t%p DUMMY PAGE state %u (%s)\n",
                                        page, page->State(), 
page_state_to_string(page->State()));

Modified: haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp       2010-07-10 
14:39:15 UTC (rev 37459)
+++ haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp       2010-07-10 
15:08:13 UTC (rev 37460)
@@ -146,11 +146,11 @@
                        B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
                        pageNumber, accessed, modified);
        } else
-               page->wired_count--;
+               page->DecrementWiredCount();
 
        recursive_lock_unlock(&fLock);
 
-       if (page->wired_count == 0 && page->mappings.IsEmpty()) {
+       if (!page->IsMapped()) {
                atomic_add(&gMappedPagesCount, -1);
 
                if (updatePageQueue) {
@@ -203,11 +203,11 @@
                ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
                        B_PRIxPHYSADDR, page, pageNumber);
        } else
-               page->wired_count--;
+               page->DecrementWiredCount();
 
        recursive_lock_unlock(&fLock);
 
-       if (page->wired_count == 0 && page->mappings.IsEmpty())
+       if (!page->IsMapped())
                atomic_add(&gMappedPagesCount, -1);
 
        if (mapping != NULL) {

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-07-10 14:39:15 UTC (rev 
37459)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-07-10 15:08:13 UTC (rev 
37460)
@@ -406,8 +406,9 @@
 static inline void
 increment_page_wired_count(vm_page* page)
 {
-       if (page->wired_count++ == 0 && page->mappings.IsEmpty())
+       if (!page->IsMapped())
                atomic_add(&gMappedPagesCount, 1);
+       page->IncrementWiredCount();
 }
 
 
@@ -416,7 +417,8 @@
 static inline void
 decrement_page_wired_count(vm_page* page)
 {
-       if (--page->wired_count == 0 && page->mappings.IsEmpty())
+       page->DecrementWiredCount();
+       if (!page->IsMapped())
                atomic_add(&gMappedPagesCount, -1);
 }
 
@@ -486,7 +488,7 @@
 {
        VMTranslationMap* map = area->address_space->TranslationMap();
 
-       bool wasMapped = page->wired_count > 0 || !page->mappings.IsEmpty();
+       bool wasMapped = page->IsMapped();
 
        if (area->wiring == B_NO_LOCK) {
                DEBUG_PAGE_ACCESS_CHECK(page);
@@ -508,7 +510,7 @@
                        area->MemoryType(), reservation);
 
                // insert mapping into lists
-               if (page->mappings.IsEmpty() && page->wired_count == 0)
+               if (!page->IsMapped())
                        atomic_add(&gMappedPagesCount, 1);
 
                page->mappings.Add(mapping);
@@ -1341,8 +1343,8 @@
 
                                DEBUG_PAGE_ACCESS_START(page);
 
+                               cache->InsertPage(page, offset);
                                increment_page_wired_count(page);
-                               cache->InsertPage(page, offset);
                                vm_page_set_state(page, PAGE_STATE_WIRED);
                                page->busy = false;
 
@@ -1377,8 +1379,8 @@
                                if (status < B_OK)
                                        panic("couldn't map physical page in 
page run\n");
 
+                               cache->InsertPage(page, offset);
                                increment_page_wired_count(page);
-                               cache->InsertPage(page, offset);
 
                                DEBUG_PAGE_ACCESS_END(page);
                        }
@@ -2152,14 +2154,22 @@
 
 /*!    Creates a new cache on top of given cache, moves all areas from
        the old cache to the new one, and changes the protection of all affected
-       areas' pages to read-only.
+       areas' pages to read-only. If requested, wired pages are moved up to the
+       new cache and copies are added to the old cache in their place.
        Preconditions:
        - The given cache must be locked.
        - All of the cache's areas' address spaces must be read locked.
-       - None of the cache's areas must have any wired ranges.
+       - Either the cache must not have any wired ranges or a page reservation 
for
+         all wired pages must be provided, so they can be copied.
+
+       \param lowerCache The cache on top of which a new cache shall be 
created.
+       \param wiredPagesReservation If \c NULL there must not be any wired 
pages
+               in \a lowerCache. Otherwise as many pages must be reserved as 
the cache
+               has wired page. The wired pages are copied in this case.
 */
 static status_t
-vm_copy_on_write_area(VMCache* lowerCache)
+vm_copy_on_write_area(VMCache* lowerCache,
+       vm_page_reservation* wiredPagesReservation)
 {
        VMCache* upperCache;
 
@@ -2187,20 +2197,64 @@
 
        lowerCache->AddConsumer(upperCache);
 
-       // We now need to remap all pages from all of the cache's areas 
read-only, so
-       // that a copy will be created on next write access
+       // We now need to remap all pages from all of the cache's areas 
read-only,
+       // so that a copy will be created on next write access. If there are 
wired
+       // pages, we keep their protection, move them to the upper cache and 
create
+       // copies for the lower cache.
+       if (wiredPagesReservation != NULL) {
+               // We need to handle wired pages -- iterate through the cache's 
pages.
+               for (VMCachePagesTree::Iterator it = 
lowerCache->pages.GetIterator();
+                               vm_page* page = it.Next();) {
+                       if (page->WiredCount() > 0) {
+                               // allocate a new page and copy the wired one
+                               vm_page* copiedPage = vm_page_allocate_page(
+                                       wiredPagesReservation, 
PAGE_STATE_ACTIVE);
 
-       for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
-                       tempArea = tempArea->cache_next) {
-               // The area must be readable in the same way it was previously 
writable
-               uint32 protection = B_KERNEL_READ_AREA;
-               if ((tempArea->protection & B_READ_AREA) != 0)
-                       protection |= B_READ_AREA;
+                               vm_memcpy_physical_page(
+                                       copiedPage->physical_page_number * 
B_PAGE_SIZE,
+                                       page->physical_page_number * 
B_PAGE_SIZE);
 
-               VMTranslationMap* map = 
tempArea->address_space->TranslationMap();
-               map->Lock();
-               map->ProtectArea(tempArea, protection);
-               map->Unlock();
+                               // move the wired page to the upper cache 
(note: removing is OK
+                               // with the SplayTree iterator) and insert the 
copy
+                               upperCache->MovePage(page);
+                               lowerCache->InsertPage(copiedPage,
+                                       page->cache_offset * B_PAGE_SIZE);
+
+                               DEBUG_PAGE_ACCESS_END(copiedPage);
+                       } else {
+                               // Change the protection of this page in all 
areas.
+                               for (VMArea* tempArea = upperCache->areas; 
tempArea != NULL;
+                                               tempArea = 
tempArea->cache_next) {
+                                       // The area must be readable in the 
same way it was
+                                       // previously writable.
+                                       uint32 protection = B_KERNEL_READ_AREA;
+                                       if ((tempArea->protection & 
B_READ_AREA) != 0)
+                                               protection |= B_READ_AREA;
+
+                                       VMTranslationMap* map
+                                               = 
tempArea->address_space->TranslationMap();
+                                       map->Lock();
+                                       map->ProtectPage(tempArea,
+                                               virtual_page_address(tempArea, 
page), protection);
+                                       map->Unlock();
+                               }
+                       }
+               }
+       } else {
+               // just change the protection of all areas
+               for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
+                               tempArea = tempArea->cache_next) {
+                       // The area must be readable in the same way it was 
previously
+                       // writable.
+                       uint32 protection = B_KERNEL_READ_AREA;
+                       if ((tempArea->protection & B_READ_AREA) != 0)
+                               protection |= B_READ_AREA;
+
+                       VMTranslationMap* map = 
tempArea->address_space->TranslationMap();
+                       map->Lock();
+                       map->ProtectArea(tempArea, protection);
+                       map->Unlock();
+               }
        }
 
        vm_area_put_locked_cache(upperCache);
@@ -2232,6 +2286,9 @@
        status_t status;
        bool sharedArea;
 
+       page_num_t wiredPages = 0;
+       vm_page_reservation wiredPagesReservation;
+
        bool restart;
        do {
                restart = false;
@@ -2249,22 +2306,48 @@
 
                sharedArea = (source->protection & B_SHARED_AREA) != 0;
 
-               // Make sure the source area (respectively, if not shared, all 
areas of
-               // the cache) doesn't have any wired ranges.
-               if (sharedArea) {
-                       if (wait_if_area_is_wired(source, &locker, 
&cacheLocker))
+               page_num_t oldWiredPages = wiredPages;
+               wiredPages = 0;
+
+               // If the source area isn't shared, count the number of wired 
pages in
+               // the cache and reserve as many pages.
+               if (!sharedArea) {
+                       wiredPages = cache->WiredPagesCount();
+
+                       if (wiredPages > oldWiredPages) {
+                               cacheLocker.Unlock();
+                               locker.Unlock();
+
+                               if (oldWiredPages > 0)
+                                       
vm_page_unreserve_pages(&wiredPagesReservation);
+
+                               vm_page_reserve_pages(&wiredPagesReservation, 
wiredPages,
+                                       VM_PRIORITY_USER);
+
                                restart = true;
-               } else {
-                       for (VMArea* area = cache->areas; area != NULL;
-                                       area = area->cache_next) {
-                               if (wait_if_area_is_wired(area, &locker, 
&cacheLocker)) {
-                                       restart = true;
-                                       break;
-                               }
                        }
-               }
+               } else if (oldWiredPages > 0)
+                       vm_page_unreserve_pages(&wiredPagesReservation);
        } while (restart);
 
+       // unreserve pages later
+       struct PagesUnreserver {
+               PagesUnreserver(vm_page_reservation* reservation)
+                       :
+                       fReservation(reservation)
+               {
+               }
+
+               ~PagesUnreserver()
+               {
+                       if (fReservation != NULL)
+                               vm_page_unreserve_pages(fReservation);
+               }
+
+       private:
+               vm_page_reservation*    fReservation;
+       } pagesUnreserver(wiredPages > 0 ? &wiredPagesReservation : NULL);
+
        if (addressSpec == B_CLONE_ADDRESS) {
                addressSpec = B_EXACT_ADDRESS;
                *_address = (void*)source->Base();
@@ -2296,8 +2379,10 @@
        if (!sharedArea) {
                if ((source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) 
!= 0) {
                        // TODO: do something more useful if this fails!
-                       if (vm_copy_on_write_area(cache) < B_OK)
+                       if (vm_copy_on_write_area(cache,
+                                       wiredPages > 0 ? &wiredPagesReservation 
: NULL) < B_OK) {
                                panic("vm_copy_on_write_area() failed!\n");
+                       }
                }
        }
 
@@ -2409,7 +2494,7 @@
                        // There are consumers -- we have to insert a new 
cache. Fortunately
                        // vm_copy_on_write_area() does everything that's 
needed.
                        changePageProtection = false;
-                       status = vm_copy_on_write_area(cache);
+                       status = vm_copy_on_write_area(cache, NULL);
                } else {
                        // No consumers, so we don't need to insert a new one.
                        if (cache->source != NULL && cache->temporary) {
@@ -2593,7 +2678,7 @@
 int32
 vm_remove_all_page_mappings_if_unaccessed(struct vm_page *page)
 {
-       ASSERT(page->wired_count == 0);
+       ASSERT(page->WiredCount() == 0);
 
        if (page->accessed)
                return vm_clear_page_mapping_accessed_flags(page);

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-07-10 14:39:15 UTC 
(rev 37459)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-07-10 15:08:13 UTC 
(rev 37460)
@@ -495,7 +495,7 @@
                        fOldState(page->State()),
                        fNewState(newState),
                        fBusy(page->busy),
-                       fWired(page->wired_count > 0),
+                       fWired(page->WiredCount() > 0),
                        fMapped(!page->mappings.IsEmpty()),
                        fAccessed(page->accessed),
                        fModified(page->modified)
@@ -698,7 +698,7 @@
        kprintf("cache_offset:    %" B_PRIuPHYSADDR "\n", page->cache_offset);
        kprintf("cache_next:      %p\n", page->cache_next);
        kprintf("state:           %s\n", page_state_to_string(page->State()));
-       kprintf("wired_count:     %d\n", page->wired_count);
+       kprintf("wired_count:     %d\n", page->WiredCount());
        kprintf("usage_count:     %d\n", page->usage_count);
        kprintf("busy:            %d\n", page->busy);
        kprintf("busy_writing:    %d\n", page->busy_writing);
@@ -818,7 +818,7 @@
                for (i = 0; page; i++, page = queue->Next(page)) {
                        kprintf("%p  %p  %-7s %8s  %5d  %5d\n", page, 
page->Cache(),
                                type, page_state_to_string(page->State()),
-                               page->wired_count, page->usage_count);
+                               page->WiredCount(), page->usage_count);
                }
        }
        return 0;
@@ -862,7 +862,7 @@
 
                if (pageState == PAGE_STATE_MODIFIED
                        && sPages[i].Cache() != NULL
-                       && sPages[i].Cache()->temporary && 
sPages[i].wired_count == 0) {
+                       && sPages[i].Cache()->temporary && 
sPages[i].WiredCount() == 0) {
                        swappableModified++;
                        if (sPages[i].usage_count == 0)
                                swappableModifiedInactive++;
@@ -948,7 +948,7 @@
 static void
 track_page_usage(vm_page* page)
 {
-       if (page->wired_count == 0) {
+       if (page->WiredCount() == 0) {
                sNextPageUsage[(int32)page->usage_count + 128]++;
                sNextPageUsagePageCount++;
        }
@@ -1981,7 +1981,7 @@
                        DEBUG_PAGE_ACCESS_START(page);
 
                        // Don't write back wired (locked) pages.
-                       if (page->wired_count > 0) {
+                       if (page->WiredCount() > 0) {
                                set_page_state(page, PAGE_STATE_ACTIVE);
                                DEBUG_PAGE_ACCESS_END(page);
                                continue;
@@ -2073,7 +2073,7 @@
        DEBUG_PAGE_ACCESS_START(page);
 
        VMCache* cache = page->Cache();
-       if (cache->temporary && page->wired_count == 0
+       if (cache->temporary && page->WiredCount() == 0
                        && cache->HasPage(page->cache_offset << PAGE_SHIFT)
                        && page->usage_count > 0) {
                // TODO: how to judge a page is highly active?
@@ -2242,9 +2242,10 @@
                // wouldn't notice when those would become unused and could 
thus be
                // moved to the cached list.
                int32 usageCount;
-               if (page->wired_count > 0 || page->usage_count > 0 || 
!cache->temporary)
+               if (page->WiredCount() > 0 || page->usage_count > 0
+                       || !cache->temporary) {
                        usageCount = vm_clear_page_mapping_accessed_flags(page);
-               else
+               } else
                        usageCount = 
vm_remove_all_page_mappings_if_unaccessed(page);
 
                if (usageCount > 0) {
@@ -2333,7 +2334,7 @@
                // Get the accessed count, clear the accessed/modified flags and
                // unmap the page, if it hasn't been accessed.
                int32 usageCount;
-               if (page->wired_count > 0)
+               if (page->WiredCount() > 0)
                        usageCount = vm_clear_page_mapping_accessed_flags(page);
                else
                        usageCount = 
vm_remove_all_page_mappings_if_unaccessed(page);
@@ -2869,19 +2870,7 @@
 
        // initialize the free page table
        for (uint32 i = 0; i < sNumPages; i++) {
-               sPages[i].physical_page_number = sPhysicalPageOffset + i;
-               sPages[i].InitState(PAGE_STATE_FREE);
-               new(&sPages[i].mappings) vm_page_mappings();
-               sPages[i].wired_count = 0;
-               sPages[i].usage_count = 0;
-               sPages[i].busy_writing = false;
-               sPages[i].SetCacheRef(NULL);
-               #if DEBUG_PAGE_QUEUE
-                       sPages[i].queue = NULL;
-               #endif
-               #if DEBUG_PAGE_ACCESS
-                       sPages[i].accessing_thread = -1;
-               #endif
+               sPages[i].Init(sPhysicalPageOffset + i);
                sFreePageQueue.Append(&sPages[i]);
        }
 


Other related posts: