From Michael Lotz <mmlr@xxxxxxxx>:
Michael Lotz has uploaded this change for review. (
https://review.haiku-os.org/c/haiku/+/2843 ;)
Change subject: kernel/vm: Add discard_address_range that discards pages.
......................................................................
kernel/vm: Add discard_address_range that discards pages.
Pages in the given range are unmapped and freed without getting written
back anywhere. It can be used whenever a caller does not care about the
data in the given range anymore and wants to reduce page pressure.
---
M headers/private/kernel/vm/VMCache.h
M src/system/kernel/vm/VMAnonymousCache.cpp
M src/system/kernel/vm/VMAnonymousCache.h
M src/system/kernel/vm/VMCache.cpp
M src/system/kernel/vm/vm.cpp
5 files changed, 71 insertions(+), 0 deletions(-)
git pull ssh://git.haiku-os.org:22/haiku refs/changes/43/2843/1
diff --git a/headers/private/kernel/vm/VMCache.h
b/headers/private/kernel/vm/VMCache.h
index 0aee4f9..2db12cf 100644
--- a/headers/private/kernel/vm/VMCache.h
+++ b/headers/private/kernel/vm/VMCache.h
@@ -135,6 +135,8 @@
virtual status_t Adopt(VMCache* source, off_t
offset, off_t size,
off_t
newOffset);
+ virtual status_t Discard(off_t offset, off_t
size);
+
status_t
FlushAndRemoveAllPages();
void* UserData() {
return fUserData; }
diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp
b/src/system/kernel/vm/VMAnonymousCache.cpp
index f801dcc..1c45c4b 100644
--- a/src/system/kernel/vm/VMAnonymousCache.cpp
+++ b/src/system/kernel/vm/VMAnonymousCache.cpp
@@ -554,6 +554,14 @@
}
+status_t
+VMAnonymousCache::Discard(off_t offset, off_t size)
+{
+ _FreeSwapPageRange(offset, offset + size);
+ return VMCache::Discard(offset, size);
+}
+
+
/*! Moves the swap pages for the given range from the source cache into this
cache. Both caches must be locked.
*/
diff --git a/src/system/kernel/vm/VMAnonymousCache.h
b/src/system/kernel/vm/VMAnonymousCache.h
index 774c342..d463a97 100644
--- a/src/system/kernel/vm/VMAnonymousCache.h
+++ b/src/system/kernel/vm/VMAnonymousCache.h
@@ -44,6 +44,8 @@
virtual status_t Adopt(VMCache* source, off_t
offset,
off_t
size, off_t newOffset);
+ virtual status_t Discard(off_t offset, off_t
size);
+
virtual status_t Commit(off_t size, int
priority);
virtual bool HasPage(off_t offset);
virtual bool DebugHasPage(off_t offset);
diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp
index d820333..46aace8 100644
--- a/src/system/kernel/vm/VMCache.cpp
+++ b/src/system/kernel/vm/VMCache.cpp
@@ -1247,6 +1247,19 @@
}
+/*! Discards pages in the given range. */
+status_t
+VMCache::Discard(off_t offset, off_t size)
+{
+ page_num_t startPage = offset >> PAGE_SHIFT;
+ page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
+ while (_FreePageRange(pages.GetIterator(startPage, true, true),
&endPage))
+ ;
+
+ return B_OK;
+}
+
+
/*! You have to call this function with the VMCache lock held. */
status_t
VMCache::FlushAndRemoveAllPages()
diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp
index c43a18f..9b25c93 100644
--- a/src/system/kernel/vm/vm.cpp
+++ b/src/system/kernel/vm/vm.cpp
@@ -865,6 +865,52 @@
}
+static status_t
+discard_area_range(VMArea* area, addr_t address, addr_t size)
+{
+ addr_t offset;
+ if (!intersect_area(area, address, size, offset))
+ return B_OK;
+
+ // If someone else uses the area's cache or it's not an anonymous
cache, we
+ // can't discard.
+ VMCache* cache = vm_area_get_locked_cache(area);
+ if (cache->areas != area || area->cache_next != NULL
+ || !cache->consumers.IsEmpty() || cache->type !=
CACHE_TYPE_RAM) {
+ return B_OK;
+ }
+
+ VMCacheChainLocker cacheChainLocker(cache);
+ cacheChainLocker.LockAllSourceCaches();
+
+ unmap_pages(area, address, size);
+
+ // Since VMCache::Discard() can temporarily drop the lock, we must
+ // unlock all lower caches to prevent locking order inversion.
+ cacheChainLocker.Unlock(cache);
+ cache->Discard(cache->virtual_base + offset, size);
+ cache->ReleaseRefAndUnlock();
+
+ return B_OK;
+}
+
+
+static status_t
+discard_address_range(VMAddressSpace* addressSpace, addr_t address, addr_t
size,
+ bool kernel)
+{
+ for (VMAddressSpace::AreaRangeIterator it
+ = addressSpace->GetAreaRangeIterator(address, size);
+ VMArea* area = it.Next();) {
+ status_t error = discard_area_range(area, address, size);
+ if (error != B_OK)
+ return error;
+ }
+
+ return B_OK;
+}
+
+
/*! You need to hold the lock of the cache and the write lock of the address
space when calling this function.
Note, that in case of error your cache will be temporarily unlocked.
--
To view, visit https://review.haiku-os.org/c/haiku/+/2843
To unsubscribe, or for help writing mail filters, visit
https://review.haiku-os.org/settings
Gerrit-Project: haiku
Gerrit-Branch: master
Gerrit-Change-Id: I8bcce68fab278efef710d3714677e1d463504a56
Gerrit-Change-Number: 2843
Gerrit-PatchSet: 1
Gerrit-Owner: Michael Lotz <mmlr@xxxxxxxx>
Gerrit-MessageType: newchange