[haiku-commits] r34751 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 22 Dec 2009 23:00:36 +0100 (CET)

Author: bonefish
Date: 2009-12-22 23:00:35 +0100 (Tue, 22 Dec 2009)
New Revision: 34751
Changeset: http://dev.haiku-os.org/changeset/34751/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/VMCache.h
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
Log:
* Added method VMCache::TransferAreas() moving areas from one cache to
  another. The code originates from vm_copy_on_write_area(). We now generate
  the VM cache tracing entries, though.
* count_writable_areas() -> VMCache::CountWritableAreas()
* Added debugger command "cache_stack" which is enabled when VM cache tracing
  is enabled. It prints the source caches of a given cache or area at the
  time of a specified tracing entry.


Modified: haiku/trunk/headers/private/kernel/vm/VMCache.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMCache.h     2009-12-22 21:33:41 UTC 
(rev 34750)
+++ haiku/trunk/headers/private/kernel/vm/VMCache.h     2009-12-22 22:00:35 UTC 
(rev 34751)
@@ -101,6 +101,8 @@
 
                        status_t                        
InsertAreaLocked(VMArea* area);
                        status_t                        RemoveArea(VMArea* 
area);
+                       void                            TransferAreas(VMCache* 
fromCache);
+                       uint32                          
CountWritableAreas(VMArea* ignoreArea) const;
 
                        status_t                        WriteModified();
                        status_t                        
SetMinimalCommitment(off_t commitment);
@@ -204,6 +206,7 @@
 #endif
 
 status_t vm_cache_init(struct kernel_args *args);
+void vm_cache_init_post_heap();
 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
        bool dontWait);
 

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2009-12-22 21:33:41 UTC 
(rev 34750)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2009-12-22 22:00:35 UTC 
(rev 34751)
@@ -79,6 +79,11 @@
                }
 #endif
 
+               VMCache* Cache() const
+               {
+                       return fCache;
+               }
+
        protected:
                VMCache*        fCache;
 #if VM_CACHE_TRACING_STACK_TRACE
@@ -182,6 +187,11 @@
                                fConsumer);
                }
 
+               VMCache* Consumer() const
+               {
+                       return fConsumer;
+               }
+
        private:
                VMCache*        fConsumer;
 };
@@ -245,6 +255,11 @@
                                fArea);
                }
 
+               VMArea* Area() const
+               {
+                       return fArea;
+               }
+
        private:
                VMArea* fArea;
 };
@@ -333,6 +348,115 @@
 #endif
 
 
+//     #pragma mark - debugger commands
+
+
+#if VM_CACHE_TRACING
+
+
+static void*
+cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
+{
+       using namespace VMCacheTracing;
+
+       // find the previous "insert area" entry for the given area
+       TraceEntryIterator iterator = baseIterator;
+       TraceEntry* entry = iterator.Current();
+       while (entry != NULL) {
+               if (InsertArea* insertAreaEntry = 
dynamic_cast<InsertArea*>(entry)) {
+                       if (insertAreaEntry->Area() == area)
+                               return insertAreaEntry->Cache();
+               }
+
+               entry = iterator.Previous();
+       }
+
+       return NULL;
+}
+
+
+static void*
+cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
+{
+       using namespace VMCacheTracing;
+
+       // find the previous "add consumer" or "create" entry for the given 
cache
+       TraceEntryIterator iterator = baseIterator;
+       TraceEntry* entry = iterator.Current();
+       while (entry != NULL) {
+               if (Create* createEntry = dynamic_cast<Create*>(entry)) {
+                       if (createEntry->Cache() == cache)
+                               return NULL;
+               } else if (AddConsumer* addEntry = 
dynamic_cast<AddConsumer*>(entry)) {
+                       if (addEntry->Consumer() == cache)
+                               return addEntry->Cache();
+               }
+
+               entry = iterator.Previous();
+       }
+
+       return NULL;
+}
+
+
+static int
+command_cache_stack(int argc, char** argv)
+{
+       if (argc < 3 || argc > 4) {
+               print_debugger_command_usage(argv[0]);
+               return 0;
+       }
+
+       bool isArea = false;
+
+       int argi = 1;
+       if (argc == 4) {
+               if (strcmp(argv[argi], "area") != 0) {
+                       print_debugger_command_usage(argv[0]);
+                       return 0;
+               }
+
+               argi++;
+               isArea = true;
+       }
+
+       uint64 addressValue;
+       uint64 debugEntryIndex;
+       if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
+               || !evaluate_debug_expression(argv[argi++], &debugEntryIndex, 
false)) {
+               return 0;
+       }
+
+       TraceEntryIterator baseIterator;
+       if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
+               kprintf("Invalid tracing entry index %" B_PRIu64 "\n", 
debugEntryIndex);
+               return 0;
+       }
+
+       void* address = (void*)(addr_t)addressValue;
+
+       kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
+               isArea ? "area" : "cache", address, debugEntryIndex);
+       if (isArea) {
+               address = cache_stack_find_area_cache(baseIterator, address);
+               if (address == NULL) {
+                       kprintf("  cache not found\n");
+                       return 0;
+               }
+       }
+
+       while (address != NULL) {
+               kprintf("  %p\n", address);
+               address = cache_stack_find_consumer(baseIterator, address);
+       }
+
+       return 0;
+}
+
+
+#endif // VM_CACHE_TRACING
+
+
 //     #pragma mark -
 
 
@@ -343,6 +467,23 @@
 }
 
 
+void
+vm_cache_init_post_heap()
+{
+#if VM_CACHE_TRACING
+       add_debugger_command_etc("cache_stack", &command_cache_stack,
+               "List the ancestors (sources) of a VMCache at the time given by 
"
+                       "tracing entry index",
+               "[ \"area\" ] <address> <tracing entry index>\n"
+               "All ancestors (sources) of a given VMCache at the time given 
by the\n"
+               "tracing entry index are listed. If \"area\" is given the 
supplied\n"
+               "address is an area instead of a cache address. The listing 
will\n"
+               "start with the area's cache at that point.\n",
+               0);
+#endif // VM_CACHE_TRACING
+}
+
+
 VMCache*
 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
 {
@@ -750,6 +891,46 @@
 }
 
 
+/*!    Transfers the areas from \a fromCache to this cache. This cache must not
+       have areas yet. Both caches must be locked.
+*/
+void
+VMCache::TransferAreas(VMCache* fromCache)
+{
+       AssertLocked();
+       fromCache->AssertLocked();
+       ASSERT(areas == NULL);
+
+       areas = fromCache->areas;
+       fromCache->areas = NULL;
+
+       for (VMArea* area = areas; area != NULL; area = area->cache_next) {
+               area->cache = this;
+               AcquireRefLocked();
+               fromCache->ReleaseRefLocked();
+
+               T(RemoveArea(fromCache, area));
+               T(InsertArea(this, area));
+       }
+}
+
+
+uint32
+VMCache::CountWritableAreas(VMArea* ignoreArea) const
+{
+       uint32 count = 0;
+
+       for (VMArea* area = areas; area != NULL; area = area->cache_next) {
+               if (area != ignoreArea
+                       && (area->protection & (B_WRITE_AREA | 
B_KERNEL_WRITE_AREA)) != 0) {
+                       count++;
+               }
+       }
+
+       return count;
+}
+
+
 status_t
 VMCache::WriteModified()
 {

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2009-12-22 21:33:41 UTC (rev 
34750)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2009-12-22 22:00:35 UTC (rev 
34751)
@@ -1638,17 +1638,7 @@
 
        // transfer the lower cache areas to the upper cache
        mutex_lock(&sAreaCacheLock);
-
-       upperCache->areas = lowerCache->areas;
-       lowerCache->areas = NULL;
-
-       for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
-                       tempArea = tempArea->cache_next) {
-               tempArea->cache = upperCache;
-               upperCache->AcquireRefLocked();
-               lowerCache->ReleaseRefLocked();
-       }
-
+       upperCache->TransferAreas(lowerCache);
        mutex_unlock(&sAreaCacheLock);
 
        lowerCache->AddConsumer(upperCache);
@@ -1744,23 +1734,6 @@
 }
 
 
-//! You need to hold the cache lock when calling this function
-static int32
-count_writable_areas(VMCache* cache, VMArea* ignoreArea)
-{
-       struct VMArea* area = cache->areas;
-       uint32 count = 0;
-
-       for (; area != NULL; area = area->cache_next) {
-               if (area != ignoreArea
-                       && (area->protection & (B_WRITE_AREA | 
B_KERNEL_WRITE_AREA)) != 0)
-                       count++;
-       }
-
-       return count;
-}
-
-
 static status_t
 vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
        bool kernel)
@@ -1799,7 +1772,7 @@
                // writable -> !writable
 
                if (cache->source != NULL && cache->temporary) {
-                       if (count_writable_areas(cache, area) == 0) {
+                       if (cache->CountWritableAreas(area) == 0) {
                                // Since this cache now lives from the pages in 
its source cache,
                                // we can change the cache's commitment to take 
only those pages
                                // into account that really are in this cache.
@@ -3325,6 +3298,8 @@
 
        TRACE(("vm_init: exit\n"));
 
+       vm_cache_init_post_heap();
+
        return err;
 }
 
@@ -4744,7 +4719,7 @@
 
        if (info.team != thread_get_current_thread()->team->id)
                return B_PERMISSION_DENIED;
-               
+
        area_id clonedArea = vm_clone_area(target, info.name, _address,
                addressSpec, info.protection, REGION_NO_PRIVATE_MAP, id, 
kernel);
        if (clonedArea < 0)


Other related posts:

  • » [haiku-commits] r34751 - in haiku/trunk: headers/private/kernel/vm src/system/kernel/vm - ingo_weinhold