[haiku-commits] r35155 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/arch/x86 src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 19 Jan 2010 09:34:14 +0100 (CET)

Author: bonefish
Date: 2010-01-19 09:34:14 +0100 (Tue, 19 Jan 2010)
New Revision: 35155
Changeset: http://dev.haiku-os.org/changeset/35155/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/VMCache.h
   haiku/trunk/headers/private/kernel/vm/vm_types.h
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_daemons.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
vm_page no longer points directly to its containing cache, but rather to a
VMCacheRef object which points to the cache. This allows to optimize
VMCache::MoveAllPages(), since it no longer needs to iterate over all pages
to adjust their cache pointer. It can simple swap the cache refs of the two
caches instead.

Reduces the total -j8 Haiku image build time only marginally. The kernel time
drops almost 10%, though.


Modified: haiku/trunk/headers/private/kernel/vm/VMCache.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-01-19 04:31:35 UTC 
(rev 35154)
+++ haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-01-19 08:34:14 UTC 
(rev 35155)
@@ -61,6 +61,7 @@
 
 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
 
+
 struct VMCache {
 public:
                                                                VMCache();
@@ -171,6 +172,7 @@
 
 private:
                        struct PageEventWaiter;
+                       friend struct VMCacheRef;
 
 private:
                        void                            
_NotifyPageEvents(vm_page* page, uint32 events);
@@ -185,6 +187,7 @@
                        mutex                           fLock;
                        PageEventWaiter*        fPageEventWaiters;
                        void*                           fUserData;
+                       VMCacheRef*                     fCacheRef;
 };
 
 

Modified: haiku/trunk/headers/private/kernel/vm/vm_types.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-19 04:31:35 UTC 
(rev 35154)
+++ haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-19 08:34:14 UTC 
(rev 35155)
@@ -26,6 +26,7 @@
 class AsyncIOCallback;
 struct vm_page_mapping;
 struct VMCache;
+struct VMCacheRef;
 typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
 
 
@@ -71,12 +72,23 @@
 
 typedef uint32 page_num_t;
 
+
+struct VMCacheRef {
+                       VMCache*                        cache;
+                       int32                           ref_count;
+
+                                                               
VMCacheRef(VMCache* cache);
+};
+
+
 struct vm_page {
        DoublyLinkedListLink<vm_page> queue_link;
 
        addr_t                                  physical_page_number;
 
-       VMCache*                                cache;
+private:
+       VMCacheRef*                             cache_ref;
+public:
        page_num_t                              cache_offset;
                                                                // in page size 
units
 
@@ -103,6 +115,13 @@
 
        int8                                    usage_count;
        uint16                                  wired_count;
+
+
+       VMCacheRef* CacheRef() const                    { return cache_ref; }
+       void SetCacheRef(VMCacheRef* cacheRef)  { this->cache_ref = cacheRef; }
+
+       VMCache* Cache() const
+               { return cache_ref != NULL ? cache_ref->cache : NULL; }
 };
 
 

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-19 04:31:35 UTC (rev 35154)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-19 08:34:14 UTC (rev 35155)
@@ -768,7 +768,7 @@
                if (page->wired_count == 0 && page->mappings.IsEmpty())
                        atomic_add(&gMappedPagesCount, -1);
 
-               if (unmapPages || page->cache != area->cache) {
+               if (unmapPages || page->Cache() != area->cache) {
                        addr_t address = area->Base()
                                + ((page->cache_offset * B_PAGE_SIZE) - 
area->cache_offset);
 

Modified: haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp       2010-01-19 
04:31:35 UTC (rev 35154)
+++ haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp       2010-01-19 
08:34:14 UTC (rev 35155)
@@ -1286,7 +1286,7 @@
 bool
 swap_free_page_swap_space(vm_page* page)
 {
-       VMAnonymousCache* cache = dynamic_cast<VMAnonymousCache*>(page->cache);
+       VMAnonymousCache* cache = 
dynamic_cast<VMAnonymousCache*>(page->Cache());
        if (cache == NULL)
                return false;
 

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-19 04:31:35 UTC 
(rev 35154)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-19 08:34:14 UTC 
(rev 35155)
@@ -491,15 +491,21 @@
        mutex_lock(&sCacheListLock);
 
        while (dontWait) {
-               VMCache* cache = page->cache;
-               if (cache == NULL || !cache->TryLock()) {
+               VMCacheRef* cacheRef = page->CacheRef();
+               if (cacheRef == NULL) {
                        mutex_unlock(&sCacheListLock);
                        return NULL;
                }
 
-               if (cache == page->cache) {
+               VMCache* cache = cacheRef->cache;
+               if (!cache->TryLock()) {
+                       mutex_unlock(&sCacheListLock);
+                       return NULL;
+               }
+
+               if (cacheRef == page->CacheRef()) {
+                       mutex_unlock(&sCacheListLock);
                        cache->AcquireRefLocked();
-                       mutex_unlock(&sCacheListLock);
                        return cache;
                }
 
@@ -508,26 +514,28 @@
        }
 
        while (true) {
-               VMCache* cache = page->cache;
-               if (cache == NULL) {
+               VMCacheRef* cacheRef = page->CacheRef();
+               if (cacheRef == NULL) {
                        mutex_unlock(&sCacheListLock);
                        return NULL;
                }
 
+               VMCache* cache = cacheRef->cache;
                if (!cache->SwitchLock(&sCacheListLock)) {
                        // cache has been deleted
                        mutex_lock(&sCacheListLock);
                        continue;
                }
 
-               if (cache == page->cache) {
+               mutex_lock(&sCacheListLock);
+               if (cache == page->Cache()) {
+                       mutex_unlock(&sCacheListLock);
                        cache->AcquireRefLocked();
                        return cache;
                }
 
                // the cache changed in the meantime
                cache->Unlock();
-               mutex_lock(&sCacheListLock);
        }
 }
 
@@ -535,6 +543,17 @@
 // #pragma mark - VMCache
 
 
+VMCacheRef::VMCacheRef(VMCache* cache)
+       :
+       cache(cache),
+       ref_count(1)
+{
+}
+
+
+// #pragma mark - VMCache
+
+
 bool
 VMCache::_IsMergeable() const
 {
@@ -545,12 +564,15 @@
 
 
 VMCache::VMCache()
+       :
+       fCacheRef(NULL)
 {
 }
 
 
 VMCache::~VMCache()
 {
+       delete fCacheRef;
 }
 
 
@@ -572,6 +594,10 @@
        type = cacheType;
        fPageEventWaiters = NULL;
 
+       fCacheRef = new(nogrow) VMCacheRef(this);
+       if (fCacheRef == NULL)
+               return B_NO_MEMORY;
+
 #if DEBUG_CACHE_LIST
        mutex_lock(&sCacheListLock);
 
@@ -607,8 +633,7 @@
 
                // remove it
                pages.Remove(page);
-               page->cache = NULL;
-               // TODO: we also need to remove all of the page's mappings!
+               page->SetCacheRef(NULL);
 
                TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
                        oldPage->physical_page_number));
@@ -689,7 +714,7 @@
        vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
 
 #if KDEBUG
-       if (page != NULL && page->cache != this)
+       if (page != NULL && page->Cache() != this)
                panic("page %p not in cache %p\n", page, this);
 #endif
 
@@ -704,9 +729,9 @@
                this, page, offset));
        AssertLocked();
 
-       if (page->cache != NULL) {
+       if (page->CacheRef() != NULL) {
                panic("insert page %p into cache %p: page cache is set to %p\n",
-                       page, this, page->cache);
+                       page, this, page->Cache());
        }
 
        T2(InsertPage(this, page, offset));
@@ -714,7 +739,7 @@
        page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
        page_count++;
        page->usage_count = 2;
-       page->cache = this;
+       page->SetCacheRef(fCacheRef);
 
 #if KDEBUG
        vm_page* otherPage = pages.Lookup(page->cache_offset);
@@ -739,16 +764,16 @@
        TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
        AssertLocked();
 
-       if (page->cache != this) {
+       if (page->Cache() != this) {
                panic("remove page %p from cache %p: page cache is set to 
%p\n", page,
-                       this, page->cache);
+                       this, page->Cache());
        }
 
        T2(RemovePage(this, page));
 
        pages.Remove(page);
-       page->cache = NULL;
        page_count--;
+       page->SetCacheRef(NULL);
 }
 
 
@@ -758,7 +783,7 @@
 void
 VMCache::MovePage(vm_page* page)
 {
-       VMCache* oldCache = page->cache;
+       VMCache* oldCache = page->Cache();
 
        AssertLocked();
        oldCache->AssertLocked();
@@ -771,7 +796,7 @@
        // insert here
        pages.Insert(page);
        page_count++;
-       page->cache = this;
+       page->SetCacheRef(fCacheRef);
        T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
 }
 
@@ -790,12 +815,20 @@
        page_count = fromCache->page_count;
        fromCache->page_count = 0;
 
+       // swap the VMCacheRefs
+       mutex_lock(&sCacheListLock);
+       std::swap(fCacheRef, fromCache->fCacheRef);
+       fCacheRef->cache = this;
+       fromCache->fCacheRef->cache = fromCache;
+       mutex_unlock(&sCacheListLock);
+
+#if VM_CACHE_TRACING >= 2
        for (VMCachePagesTree::Iterator it = pages.GetIterator();
                        vm_page* page = it.Next();) {
-               page->cache = this;
                T2(RemovePage(fromCache, page));
                T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
        }
+#endif
 }
 
 

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-19 04:31:35 UTC (rev 
35154)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-19 08:34:14 UTC (rev 
35155)
@@ -3686,7 +3686,7 @@
                // insert the new page into our cache
                cache->InsertPage(page, context.cacheOffset);
 
-       } else if (page->cache != context.topCache && context.isWrite) {
+       } else if (page->Cache() != context.topCache && context.isWrite) {
                // We have a page that has the data we want, but in the wrong 
cache
                // object so we need to copy it and stick it into the top cache.
                vm_page* sourcePage = page;
@@ -3709,7 +3709,7 @@
 
                context.cacheChainLocker.RelockCaches(true);
                sourcePage->state = sourcePageState;
-               sourcePage->cache->NotifyPageEvents(sourcePage, 
PAGE_EVENT_NOT_BUSY);
+               sourcePage->Cache()->NotifyPageEvents(sourcePage, 
PAGE_EVENT_NOT_BUSY);
 
                // insert the new page into our cache
                context.topCache->InsertPage(page, context.cacheOffset);
@@ -3824,7 +3824,7 @@
                // it's mapped in read-only, so that we cannot overwrite 
someone else's
                // data (copy-on-write)
                uint32 newProtection = protection;
-               if (context.page->cache != context.topCache && !isWrite)
+               if (context.page->Cache() != context.topCache && !isWrite)
                        newProtection &= ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA);
 
                bool unmapPage = false;
@@ -5210,7 +5210,7 @@
                        // If the page is not in the topmost cache and write 
access is
                        // requested, we have to unmap it. Otherwise we can 
re-map it with
                        // the new protection.
-                       bool unmapPage = page->cache != topCache
+                       bool unmapPage = page->Cache() != topCache
                                && (protection & B_WRITE_AREA) != 0;
 
                        if (!unmapPage) {

Modified: haiku/trunk/src/system/kernel/vm/vm_daemons.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_daemons.cpp     2010-01-19 04:31:35 UTC 
(rev 35154)
+++ haiku/trunk/src/system/kernel/vm/vm_daemons.cpp     2010-01-19 08:34:14 UTC 
(rev 35155)
@@ -67,8 +67,7 @@
 {
        if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY
                || page->state == PAGE_STATE_FREE || page->state == 
PAGE_STATE_CLEAR
-               || page->state == PAGE_STATE_UNUSED || page->wired_count > 0
-               || page->cache == NULL)
+               || page->state == PAGE_STATE_UNUSED || page->wired_count > 0)
                return true;
 
        return false;
@@ -102,7 +101,7 @@
        if (fPage == NULL)
                return;
 
-       fPage->cache->ReleaseRefAndUnlock();
+       fPage->Cache()->ReleaseRefAndUnlock();
 
        fPage = NULL;
 }
@@ -342,8 +341,9 @@
 
        DEBUG_PAGE_ACCESS_START(page);
 
-       if (page->cache->temporary && page->wired_count == 0
-                       && page->cache->HasPage(page->cache_offset << 
PAGE_SHIFT)
+       VMCache* cache = page->Cache();
+       if (cache->temporary && page->wired_count == 0
+                       && cache->HasPage(page->cache_offset << PAGE_SHIFT)
                        && page->usage_count > 0) {
                // TODO: how to judge a page is highly active?
                if (swap_free_page_swap_space(page)) {

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-19 04:31:35 UTC 
(rev 35154)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-19 08:34:14 UTC 
(rev 35155)
@@ -240,7 +240,7 @@
        public:
                WritePage(vm_page* page)
                        :
-                       fCache(page->cache),
+                       fCache(page->Cache()),
                        fPage(page)
                {
                        Initialized();
@@ -404,7 +404,7 @@
        kprintf("queue_next,prev: %p, %p\n", page->queue_link.next,
                page->queue_link.previous);
        kprintf("physical_number: %#lx\n", page->physical_page_number);
-       kprintf("cache:           %p\n", page->cache);
+       kprintf("cache:           %p\n", page->Cache());
        kprintf("cache_offset:    %ld\n", page->cache_offset);
        kprintf("cache_next:      %p\n", page->cache_next);
        kprintf("is dummy:        %d\n", page->is_dummy);
@@ -496,8 +496,8 @@
                const char *type = "none";
                int i;
 
-               if (page->cache != NULL) {
-                       switch (page->cache->type) {
+               if (page->Cache() != NULL) {
+                       switch (page->Cache()->type) {
                                case CACHE_TYPE_RAM:
                                        type = "RAM";
                                        break;
@@ -518,7 +518,7 @@
 
                kprintf("page        cache       type       state  wired  
usage\n");
                for (i = 0; page; i++, page = queue->Next(page)) {
-                       kprintf("%p  %p  %-7s %8s  %5d  %5d\n", page, 
page->cache,
+                       kprintf("%p  %p  %-7s %8s  %5d  %5d\n", page, 
page->Cache(),
                                type, page_state_to_string(page->state),
                                page->wired_count, page->usage_count);
                }
@@ -543,8 +543,8 @@
 
                counter[sPages[i].state]++;
 
-               if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].cache 
!= NULL
-                       && sPages[i].cache->temporary && sPages[i].wired_count 
== 0) {
+               if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() 
!= NULL
+                       && sPages[i].Cache()->temporary && 
sPages[i].wired_count == 0) {
                        swappableModified++;
                        if (sPages[i].usage_count < 0)
                                swappableModifiedInactive++;
@@ -610,7 +610,7 @@
                        return;
        }
 
-       if (page->cache != NULL)
+       if (page->CacheRef() != NULL)
                panic("to be freed page %p has cache", page);
        if (!page->mappings.IsEmpty() || page->wired_count > 0)
                panic("to be freed page %p has mappings", page);
@@ -715,7 +715,8 @@
                        sFreePageCondition.NotifyOne();
        }
 
-       if (page->cache != NULL && page->cache->temporary) {
+       VMCache* cache = page->Cache();
+       if (cache != NULL && cache->temporary) {
                if (pageState == PAGE_STATE_MODIFIED)
                        atomic_add(&sModifiedTemporaryPages, 1);
                else if (page->state == PAGE_STATE_MODIFIED)
@@ -730,8 +731,8 @@
                // page states and active pages have a cache that must be 
locked at
                // this point. So we rely on the fact that everyone must lock 
the cache
                // before trying to change/interpret the page state.
-               ASSERT(page->cache != NULL);
-               page->cache->AssertLocked();
+               ASSERT(cache != NULL);
+               cache->AssertLocked();
                page->state = pageState;
        } else {
                if (fromQueue != NULL)
@@ -767,7 +768,7 @@
                VMPageQueue& queue = state == PAGE_STATE_ACTIVE
                        ? sActivePageQueue : sInactivePageQueue;
                queue.AppendUnlocked(page);
-               if (page->cache->temporary)
+               if (page->Cache()->temporary)
                        atomic_add(&sModifiedTemporaryPages, -1);
        } else
                set_page_state(page, state);
@@ -1026,7 +1027,7 @@
                panic("re-setting page write wrapper that isn't completed");
 
        fPage = page;
-       fCache = page->cache;
+       fCache = page->Cache();
        fDequeuedPage = dequeuedPage;
        fIsActive = true;
 
@@ -1125,7 +1126,7 @@
 PageWriteTransfer::SetTo(PageWriterRun* run, vm_page* page, int32 maxPages)
 {
        fRun = run;
-       fCache = page->cache;
+       fCache = page->Cache();
        fOffset = page->cache_offset;
        fPageCount = 1;
        fMaxPages = maxPages;
@@ -1142,7 +1143,7 @@
 bool
 PageWriteTransfer::AddPage(vm_page* page)
 {
-       if (page->cache != fCache
+       if (page->Cache() != fCache
                || (fMaxPages >= 0 && fPageCount >= (uint32)fMaxPages))
                return false;
 
@@ -1271,7 +1272,7 @@
 
        if (fTransferCount == 0 || !fTransfers[fTransferCount - 
1].AddPage(page)) {
                fTransfers[fTransferCount++].SetTo(this, page,
-                       page->cache->MaxPagesPerAsyncWrite());
+                       page->Cache()->MaxPagesPerAsyncWrite());
        }
 }
 
@@ -1360,7 +1361,7 @@
 
        vm_page marker;
        marker.is_dummy = true;
-       marker.cache = NULL;
+       marker.SetCacheRef(NULL);
        marker.state = PAGE_STATE_UNUSED;
 #if DEBUG_PAGE_QUEUE
        marker.queue = NULL;
@@ -1425,7 +1426,7 @@
 
                        DEBUG_PAGE_ACCESS_START(page);
 
-                       VMCache *cache = page->cache;
+                       VMCache *cache = page->Cache();
 
                        // Don't write back wired (locked) pages and don't 
write RAM pages
                        // until we're low on pages. Also avoid writing 
temporary pages that
@@ -1544,9 +1545,10 @@
        // try to lock the page's cache
        if (vm_cache_acquire_locked_page_cache(page, false) == NULL)
                return false;
+       VMCache* cache = page->Cache();
 
-       AutoLocker<VMCache> cacheLocker(page->cache, true);
-       MethodDeleter<VMCache> _2(page->cache, &VMCache::ReleaseRefLocked);
+       AutoLocker<VMCache> cacheLocker(cache, true);
+       MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked);
 
        // check again if that page is still a candidate
        if (page->state != PAGE_STATE_INACTIVE)
@@ -1574,7 +1576,7 @@
        //dprintf("  steal page %p from cache %p%s\n", page, page->cache,
        //      page->state == PAGE_STATE_INACTIVE ? "" : " (ACTIVE)");
 
-       page->cache->RemovePage(page);
+       cache->RemovePage(page);
 
        sInactivePageQueue.RemoveUnlocked(page);
        return true;
@@ -1587,7 +1589,7 @@
        while (true) {
                vm_page marker;
                marker.is_dummy = true;
-               marker.cache = NULL;
+               marker.SetCacheRef(NULL);
                marker.state = PAGE_STATE_UNUSED;
 #if DEBUG_PAGE_QUEUE
                marker.queue = NULL;
@@ -1890,7 +1892,7 @@
                sPages[i].wired_count = 0;
                sPages[i].usage_count = 0;
                sPages[i].busy_writing = false;
-               sPages[i].cache = NULL;
+               sPages[i].SetCacheRef(NULL);
                #if DEBUG_PAGE_QUEUE
                        sPages[i].queue = NULL;
                #endif
@@ -2160,7 +2162,7 @@
                }
        }
 
-       if (page->cache != NULL)
+       if (page->CacheRef() != NULL)
                panic("supposed to be free page %p has cache\n", page);
 
        DEBUG_PAGE_ACCESS_START(page);
@@ -2383,8 +2385,7 @@
 void
 vm_page_requeue(struct vm_page *page, bool tail)
 {
-       ASSERT(page->cache != NULL);
-       page->cache->AssertLocked();
+       ASSERT(page->Cache() != NULL);
        DEBUG_PAGE_ACCESS_CHECK(page);
 
        VMPageQueue *queue = NULL;


Other related posts: