[haiku-commits] Change in haiku[master]: VMAnonymousCache: Factor out page freeing from Resize/Rebase.

  • From: Gerrit <review@xxxxxxxxxxxxxxxxxxx>
  • To: waddlesplash <waddlesplash@xxxxxxxxx>, haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 5 May 2020 19:31:23 +0000

From Michael Lotz <mmlr@xxxxxxxx>:

Michael Lotz has uploaded this change for review. ( 
https://review.haiku-os.org/c/haiku/+/2582 ;)


Change subject: VMAnonymousCache: Factor out page freeing from Resize/Rebase.
......................................................................

VMAnonymousCache: Factor out page freeing from Resize/Rebase.

Except for the offsets the code was identical. Also simplify the
conditions with early returns.
---
M src/system/kernel/vm/VMAnonymousCache.cpp
M src/system/kernel/vm/VMAnonymousCache.h
2 files changed, 60 insertions(+), 104 deletions(-)



  git pull ssh://git.haiku-os.org:22/haiku refs/changes/82/2582/1

diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp 
b/src/system/kernel/vm/VMAnonymousCache.cpp
index f07a90d..0d4f776 100644
--- a/src/system/kernel/vm/VMAnonymousCache.cpp
+++ b/src/system/kernel/vm/VMAnonymousCache.cpp
@@ -486,61 +486,65 @@
 }


+void
+VMAnonymousCache::_FreeSwapPageRange(off_t fromOffset, off_t toOffset)
+{
+       swap_block* swapBlock = NULL;
+       off_t toIndex = toOffset >> PAGE_SHIFT;
+       for (off_t pageIndex = fromOffset >> PAGE_SHIFT;
+               pageIndex < toIndex && fAllocatedSwapSize > 0; pageIndex++) {
+
+               WriteLocker locker(sSwapHashLock);
+
+               // Get the swap slot index for the page.
+               swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
+               if (swapBlock == NULL || blockIndex == 0) {
+                       swap_hash_key key = { this, pageIndex };
+                       swapBlock = sSwapHashTable.Lookup(key);
+
+                       if (swapBlock == NULL) {
+                               pageIndex = ROUNDUP(pageIndex + 1, 
SWAP_BLOCK_PAGES);
+                               continue;
+                       }
+               }
+
+               swap_addr_t slotIndex = swapBlock->swap_slots[blockIndex];
+               if (slotIndex == SWAP_SLOT_NONE)
+                       continue;
+
+               vm_page* page = LookupPage(pageIndex * B_PAGE_SIZE);
+               if (page != NULL && page->busy) {
+                       // TODO: We skip (i.e. leak) swap space of busy pages, 
since
+                       // there could be I/O going on (paging in/out). Waiting 
is
+                       // not an option as 1. unlocking the cache means that 
new
+                       // swap pages could be added in a range we've already
+                       // cleared (since the cache still has the old size) and 
2.
+                       // we'd risk a deadlock in case we come from the file 
cache
+                       // and the FS holds the node's write-lock. We should 
mark
+                       // the page invalid and let the one responsible clean 
up.
+                       // There's just no such mechanism yet.
+                       continue;
+               }
+
+               swap_slot_dealloc(slotIndex, 1);
+               fAllocatedSwapSize -= B_PAGE_SIZE;
+
+               swapBlock->swap_slots[blockIndex] = SWAP_SLOT_NONE;
+               if (--swapBlock->used == 0) {
+                       // All swap pages have been freed -- we can discard the 
swap block.
+                       sSwapHashTable.RemoveUnchecked(swapBlock);
+                       object_cache_free(sSwapBlockCache, swapBlock,
+                               CACHE_DONT_WAIT_FOR_MEMORY | 
CACHE_DONT_LOCK_KERNEL_SPACE);
+               }
+       }
+}
+
+
 status_t
 VMAnonymousCache::Resize(off_t newSize, int priority)
 {
-       // If the cache size shrinks, drop all swap pages beyond the new size.
-       if (fAllocatedSwapSize > 0) {
-               off_t oldPageCount = (virtual_end + B_PAGE_SIZE - 1) >> 
PAGE_SHIFT;
-               swap_block* swapBlock = NULL;
-
-               for (off_t pageIndex = (newSize + B_PAGE_SIZE - 1) >> 
PAGE_SHIFT;
-                       pageIndex < oldPageCount && fAllocatedSwapSize > 0; 
pageIndex++) {
-
-                       WriteLocker locker(sSwapHashLock);
-
-                       // Get the swap slot index for the page.
-                       swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
-                       if (swapBlock == NULL || blockIndex == 0) {
-                               swap_hash_key key = { this, pageIndex };
-                               swapBlock = sSwapHashTable.Lookup(key);
-
-                               if (swapBlock == NULL) {
-                                       pageIndex = ROUNDUP(pageIndex + 1, 
SWAP_BLOCK_PAGES);
-                                       continue;
-                               }
-                       }
-
-                       swap_addr_t slotIndex = 
swapBlock->swap_slots[blockIndex];
-                       vm_page* page;
-                       if (slotIndex != SWAP_SLOT_NONE
-                               && ((page = LookupPage((off_t)pageIndex * 
B_PAGE_SIZE)) == NULL
-                                       || !page->busy)) {
-                                       // TODO: We skip (i.e. leak) swap space 
of busy pages, since
-                                       // there could be I/O going on (paging 
in/out). Waiting is
-                                       // not an option as 1. unlocking the 
cache means that new
-                                       // swap pages could be added in a range 
we've already
-                                       // cleared (since the cache still has 
the old size) and 2.
-                                       // we'd risk a deadlock in case we come 
from the file cache
-                                       // and the FS holds the node's 
write-lock. We should mark
-                                       // the page invalid and let the one 
responsible clean up.
-                                       // There's just no such mechanism yet.
-                               swap_slot_dealloc(slotIndex, 1);
-                               fAllocatedSwapSize -= B_PAGE_SIZE;
-
-                               swapBlock->swap_slots[blockIndex] = 
SWAP_SLOT_NONE;
-                               if (--swapBlock->used == 0) {
-                                       // All swap pages have been freed -- we 
can discard the swap
-                                       // block.
-                                       
sSwapHashTable.RemoveUnchecked(swapBlock);
-                                       object_cache_free(sSwapBlockCache, 
swapBlock,
-                                               CACHE_DONT_WAIT_FOR_MEMORY
-                                                       | 
CACHE_DONT_LOCK_KERNEL_SPACE);
-                               }
-                       }
-               }
-       }
-
+       _FreeSwapPageRange(newSize + B_PAGE_SIZE - 1,
+               virtual_end + B_PAGE_SIZE - 1);
        return VMCache::Resize(newSize, priority);
 }

@@ -548,58 +552,7 @@
 status_t
 VMAnonymousCache::Rebase(off_t newBase, int priority)
 {
-       // If the cache size shrinks, drop all swap pages beyond the new size.
-       if (fAllocatedSwapSize > 0) {
-               off_t basePage = newBase >> PAGE_SHIFT;
-               swap_block* swapBlock = NULL;
-
-               for (off_t pageIndex = 0;
-                               pageIndex < basePage && fAllocatedSwapSize > 0;
-                               pageIndex++) {
-                       WriteLocker locker(sSwapHashLock);
-
-                       // Get the swap slot index for the page.
-                       swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
-                       if (swapBlock == NULL || blockIndex == 0) {
-                               swap_hash_key key = { this, pageIndex };
-                               swapBlock = sSwapHashTable.Lookup(key);
-
-                               if (swapBlock == NULL) {
-                                       pageIndex = ROUNDUP(pageIndex + 1, 
SWAP_BLOCK_PAGES);
-                                       continue;
-                               }
-                       }
-
-                       swap_addr_t slotIndex = 
swapBlock->swap_slots[blockIndex];
-                       vm_page* page;
-                       if (slotIndex != SWAP_SLOT_NONE
-                               && ((page = LookupPage(pageIndex * 
B_PAGE_SIZE)) == NULL
-                                       || !page->busy)) {
-                                       // TODO: We skip (i.e. leak) swap space 
of busy pages, since
-                                       // there could be I/O going on (paging 
in/out). Waiting is
-                                       // not an option as 1. unlocking the 
cache means that new
-                                       // swap pages could be added in a range 
we've already
-                                       // cleared (since the cache still has 
the old size) and 2.
-                                       // we'd risk a deadlock in case we come 
from the file cache
-                                       // and the FS holds the node's 
write-lock. We should mark
-                                       // the page invalid and let the one 
responsible clean up.
-                                       // There's just no such mechanism yet.
-                               swap_slot_dealloc(slotIndex, 1);
-                               fAllocatedSwapSize -= B_PAGE_SIZE;
-
-                               swapBlock->swap_slots[blockIndex] = 
SWAP_SLOT_NONE;
-                               if (--swapBlock->used == 0) {
-                                       // All swap pages have been freed -- we 
can discard the swap
-                                       // block.
-                                       
sSwapHashTable.RemoveUnchecked(swapBlock);
-                                       object_cache_free(sSwapBlockCache, 
swapBlock,
-                                               CACHE_DONT_WAIT_FOR_MEMORY
-                                                       | 
CACHE_DONT_LOCK_KERNEL_SPACE);
-                               }
-                       }
-               }
-       }
-
+       _FreeSwapPageRange(virtual_base, newBase);
        return VMCache::Rebase(newBase, priority);
 }

diff --git a/src/system/kernel/vm/VMAnonymousCache.h 
b/src/system/kernel/vm/VMAnonymousCache.h
index 4e17d53..f2df602 100644
--- a/src/system/kernel/vm/VMAnonymousCache.h
+++ b/src/system/kernel/vm/VMAnonymousCache.h
@@ -88,6 +88,9 @@
                                                                        
VMAnonymousCache* source);
                        void                            
_MergeSwapPages(VMAnonymousCache* source);

+                       void                            
_FreeSwapPageRange(off_t fromOffset,
+                                                                       off_t 
toOffset);
+
 private:
        friend bool swap_free_page_swap_space(vm_page* page);


--
To view, visit https://review.haiku-os.org/c/haiku/+/2582
To unsubscribe, or for help writing mail filters, visit 
https://review.haiku-os.org/settings

Gerrit-Project: haiku
Gerrit-Branch: master
Gerrit-Change-Id: Ia7c44578ab06e571f6bf992db6c6d3493fd93230
Gerrit-Change-Number: 2582
Gerrit-PatchSet: 1
Gerrit-Owner: Michael Lotz <mmlr@xxxxxxxx>
Gerrit-MessageType: newchange

Other related posts:

  • » [haiku-commits] Change in haiku[master]: VMAnonymousCache: Factor out page freeing from Resize/Rebase. - Gerrit