[haiku-commits] Change in haiku[master]: kernel/vm: Make vm_copy_area take page protections into account.

  • From: Gerrit <review@xxxxxxxxxxxxxxxxxxx>
  • To: waddlesplash <waddlesplash@xxxxxxxxx>, haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 23 Aug 2020 00:53:27 +0000

From Michael Lotz <mmlr@xxxxxxxx>:

Michael Lotz has uploaded this change for review. ( 
https://review.haiku-os.org/c/haiku/+/3166 ;)


Change subject: kernel/vm: Make vm_copy_area take page protections into account.
......................................................................

kernel/vm: Make vm_copy_area take page protections into account.

When copying an area with vm_copy_area only the new protection would be
applied and any possibly existing page protections on the source area
were ignored.

For areas with stricter area protection than page protection, this lead
to faults when accessing the copy. In the opposite case it lead to too
relaxed protection. The currently only user of vm_copy_area is
fork_team which goes through all areas of the parent and copies them to
the new team. Hence page protections were ignored on all forked teams.

Remove the protection argument and instead always carry over the source
area protection and duplicate the page protections when present.

Also make sure to take the page protections into account for deciding
whether or not the copy is writable and therefore needs to have copy on
write semantics.
---
M headers/private/kernel/vm/vm.h
M src/system/kernel/team.cpp
M src/system/kernel/vm/vm.cpp
3 files changed, 35 insertions(+), 15 deletions(-)



  git pull ssh://git.haiku-os.org:22/haiku refs/changes/66/3166/1

diff --git a/headers/private/kernel/vm/vm.h b/headers/private/kernel/vm/vm.h
index 9cbf984..715a75d 100644
--- a/headers/private/kernel/vm/vm.h
+++ b/headers/private/kernel/vm/vm.h
@@ -113,7 +113,7 @@
 area_id vm_create_null_area(team_id team, const char *name, void **address,
                        uint32 addressSpec, addr_t size, uint32 flags);
 area_id vm_copy_area(team_id team, const char *name, void **_address,
-                       uint32 addressSpec, uint32 protection, area_id 
sourceID);
+                       uint32 addressSpec, area_id sourceID);
 area_id vm_clone_area(team_id team, const char *name, void **address,
                        uint32 addressSpec, uint32 protection, uint32 mapping,
                        area_id sourceArea, bool kernel);
diff --git a/src/system/kernel/team.cpp b/src/system/kernel/team.cpp
index ec5339a..8fc10a8 100644
--- a/src/system/kernel/team.cpp
+++ b/src/system/kernel/team.cpp
@@ -2138,7 +2138,7 @@
                } else {
                        void* address;
                        area_id area = vm_copy_area(team->address_space->ID(), 
info.name,
-                               &address, B_CLONE_ADDRESS, info.protection, 
info.area);
+                               &address, B_CLONE_ADDRESS, info.area);
                        if (area < B_OK) {
                                status = area;
                                break;
diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp
index cc6aff5..36d8a31 100644
--- a/src/system/kernel/vm/vm.cpp
+++ b/src/system/kernel/vm/vm.cpp
@@ -2535,17 +2535,8 @@

 area_id
 vm_copy_area(team_id team, const char* name, void** _address,
-       uint32 addressSpec, uint32 protection, area_id sourceID)
+       uint32 addressSpec, area_id sourceID)
 {
-       bool writableCopy = (protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) 
!= 0;
-
-       if ((protection & B_KERNEL_PROTECTION) == 0) {
-               // set the same protection for the kernel as for userland
-               protection |= B_KERNEL_READ_AREA;
-               if (writableCopy)
-                       protection |= B_KERNEL_WRITE_AREA;
-       }
-
        // Do the locking: target address space, all address spaces associated 
with
        // the source cache, and the cache itself.
        MultiAddressSpaceLocker locker;
@@ -2618,6 +2609,30 @@
                vm_page_reservation*    fReservation;
        } pagesUnreserver(wiredPages > 0 ? &wiredPagesReservation : NULL);

+       bool writableCopy
+               = (source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) 
!= 0;
+       uint8* targetPageProtections = NULL;
+
+       if (source->page_protections != NULL) {
+               size_t bytes = (source->Size() / B_PAGE_SIZE + 1) / 2;
+               targetPageProtections = (uint8*)malloc_etc(bytes,
+                       HEAP_DONT_LOCK_KERNEL_SPACE);
+               if (targetPageProtections == NULL)
+                       return B_NO_MEMORY;
+
+               memcpy(targetPageProtections, source->page_protections, bytes);
+
+               if (!writableCopy) {
+                       for (size_t i = 0; i < bytes; i++) {
+                               if ((targetPageProtections[i]
+                                               & (B_WRITE_AREA | B_WRITE_AREA 
<< 4)) != 0) {
+                                       writableCopy = true;
+                                       break;
+                               }
+                       }
+               }
+       }
+
        if (addressSpec == B_CLONE_ADDRESS) {
                addressSpec = B_EXACT_ADDRESS;
                *_address = (void*)source->Base();
@@ -2631,12 +2646,17 @@
        addressRestrictions.address = *_address;
        addressRestrictions.address_specification = addressSpec;
        status = map_backing_store(targetAddressSpace, cache, 
source->cache_offset,
-               name, source->Size(), source->wiring, protection,
+               name, source->Size(), source->wiring, source->protection,
                sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP,
                writableCopy ? 0 : CREATE_AREA_DONT_COMMIT_MEMORY,
                &addressRestrictions, true, &target, _address);
-       if (status < B_OK)
+       if (status < B_OK) {
+               free_etc(targetPageProtections, HEAP_DONT_LOCK_KERNEL_SPACE);
                return status;
+       }
+
+       if (targetPageProtections != NULL)
+               target->page_protections = targetPageProtections;

        if (sharedArea) {
                // The new area uses the old area's cache, but 
map_backing_store()
@@ -2647,7 +2667,7 @@
        // If the source area is writable, we need to move it one layer up as 
well

        if (!sharedArea) {
-               if ((source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) 
!= 0) {
+               if (writableCopy) {
                        // TODO: do something more useful if this fails!
                        if (vm_copy_on_write_area(cache,
                                        wiredPages > 0 ? &wiredPagesReservation 
: NULL) < B_OK) {

--
To view, visit https://review.haiku-os.org/c/haiku/+/3166
To unsubscribe, or for help writing mail filters, visit 
https://review.haiku-os.org/settings

Gerrit-Project: haiku
Gerrit-Branch: master
Gerrit-Change-Id: I52f295f2aaa66e31b4900b754343b3be9a19ba30
Gerrit-Change-Number: 3166
Gerrit-PatchSet: 1
Gerrit-Owner: Michael Lotz <mmlr@xxxxxxxx>
Gerrit-MessageType: newchange

Other related posts:

  • » [haiku-commits] Change in haiku[master]: kernel/vm: Make vm_copy_area take page protections into account. - Gerrit