[haiku-commits] r36373 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 20 Apr 2010 16:04:18 +0200 (CEST)

Author: bonefish
Date: 2010-04-20 16:04:18 +0200 (Tue, 20 Apr 2010)
New Revision: 36373
Changeset: http://dev.haiku-os.org/changeset/36373/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/VMCache.h
   haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp
   haiku/trunk/src/system/kernel/vm/VMAnonymousCache.h
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
Log:
Made VMCache::Resize() virtual and let VMAnonymousCache override it to free
swap space when the cache shrinks. Currently the implementation stil leaks
swap space of busy pages.


Modified: haiku/trunk/headers/private/kernel/vm/VMCache.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-04-20 13:56:45 UTC 
(rev 36372)
+++ haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-04-20 14:04:18 UTC 
(rev 36373)
@@ -110,7 +110,7 @@
                        status_t                        WriteModified();
                        status_t                        
SetMinimalCommitment(off_t commitment,
                                                                        int 
priority);
-                       status_t                        Resize(off_t newSize, 
int priority);
+       virtual status_t                        Resize(off_t newSize, int 
priority);
 
                        status_t                        
FlushAndRemoveAllPages();
 

Modified: haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp       2010-04-20 
13:56:45 UTC (rev 36372)
+++ haiku/trunk/src/system/kernel/vm/VMAnonymousCache.cpp       2010-04-20 
14:04:18 UTC (rev 36373)
@@ -464,6 +464,65 @@
 
 
 status_t
+VMAnonymousCache::Resize(off_t newSize, int priority)
+{
+       // If the cache size shrinks, drop all swap pages beyond the new size.
+       if (fAllocatedSwapSize > 0) {
+               page_num_t oldPageCount = (virtual_end + B_PAGE_SIZE - 1) >> 
PAGE_SHIFT;
+               swap_block* swapBlock = NULL;
+
+               for (page_num_t pageIndex = (newSize + B_PAGE_SIZE - 1) >> 
PAGE_SHIFT;
+                               pageIndex < oldPageCount && fAllocatedSwapSize 
> 0;
+                               pageIndex++) {
+                       WriteLocker locker(sSwapHashLock);
+
+                       // Get the swap slot index for the page.
+                       swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
+                       if (swapBlock == NULL || blockIndex == 0) {
+                               swap_hash_key key = { this, pageIndex };
+                               swapBlock = sSwapHashTable.Lookup(key);
+
+                               if (swapBlock == NULL) {
+                                       pageIndex = ROUNDUP(pageIndex + 1, 
SWAP_BLOCK_PAGES);
+                                       continue;
+                               }
+                       }
+
+                       swap_addr_t slotIndex = 
swapBlock->swap_slots[blockIndex];
+                       vm_page* page;
+                       if (slotIndex != SWAP_SLOT_NONE
+                               && ((page = LookupPage((off_t)pageIndex * 
B_PAGE_SIZE)) == NULL
+                                       || !page->busy)) {
+                                       // TODO: We skip (i.e. leak) swap space 
of busy pages, since
+                                       // there could be I/O going on (paging 
in/out). Waiting is
+                                       // not an option as 1. unlocking the 
cache means that new
+                                       // swap pages could be added in a range 
we've already
+                                       // cleared (since the cache still has 
the old size) and 2.
+                                       // we'd risk a deadlock in case we come 
from the file cache
+                                       // and the FS holds the node's 
write-lock. We should mark
+                                       // the page invalid and let the one 
responsible clean up.
+                                       // There's just no such mechanism yet.
+                               swap_slot_dealloc(slotIndex, 1);
+                               fAllocatedSwapSize -= B_PAGE_SIZE;
+
+                               swapBlock->swap_slots[blockIndex] = 
SWAP_SLOT_NONE;
+                               if (--swapBlock->used == 0) {
+                                       // All swap pages have been freed -- we 
can discard the swap
+                                       // block.
+                                       
sSwapHashTable.RemoveUnchecked(swapBlock);
+                                       object_cache_free(sSwapBlockCache, 
swapBlock,
+                                               CACHE_DONT_WAIT_FOR_MEMORY
+                                                       | 
CACHE_DONT_LOCK_KERNEL_SPACE);
+                               }
+                       }
+               }
+       }
+
+       return VMCache::Resize(newSize, priority);
+}
+
+
+status_t
 VMAnonymousCache::Commit(off_t size, int priority)
 {
        TRACE("%p->VMAnonymousCache::Commit(%lld)\n", this, size);

Modified: haiku/trunk/src/system/kernel/vm/VMAnonymousCache.h
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMAnonymousCache.h 2010-04-20 13:56:45 UTC 
(rev 36372)
+++ haiku/trunk/src/system/kernel/vm/VMAnonymousCache.h 2010-04-20 14:04:18 UTC 
(rev 36373)
@@ -38,6 +38,8 @@
                                                                        int32 
numGuardPages,
                                                                        uint32 
allocationFlags);
 
+       virtual status_t                        Resize(off_t newSize, int 
priority);
+
        virtual status_t                        Commit(off_t size, int 
priority);
        virtual bool                            HasPage(off_t offset);
        virtual bool                            DebugHasPage(off_t offset);

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-04-20 13:56:45 UTC 
(rev 36372)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-04-20 14:04:18 UTC 
(rev 36373)
@@ -1038,8 +1038,6 @@
 status_t
 VMCache::Resize(off_t newSize, int priority)
 {
-// TODO: This method must be virtual as VMAnonymousCache needs to free 
allocated
-// swap pages!
        TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
                this, newSize, this->virtual_end));
        this->AssertLocked();


Other related posts:

  • » [haiku-commits] r36373 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm - ingo_weinhold