[haiku-commits] r35331 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ...

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Fri, 29 Jan 2010 11:00:45 +0100 (CET)

Author: bonefish
Date: 2010-01-29 11:00:45 +0100 (Fri, 29 Jan 2010)
New Revision: 35331
Changeset: http://dev.haiku-os.org/changeset/35331/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/VMCache.h
   haiku/trunk/headers/private/kernel/vm/vm_page.h
   haiku/trunk/headers/private/kernel/vm/vm_types.h
   haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
   haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/cache/file_cache.cpp
   haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_daemons.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
* Removed the page state PAGE_STATE_BUSY and instead introduced a vm_page::busy
  flag. The obvious advantage is that one can still see what state a page is in
  and even move it between states while being marked busy.
* Removed the vm_page::is_dummy flag. Instead we mark marker pages busy, which
  in all cases has the same effect. Introduced a vm_page_is_dummy() that can
  still check whether a given page is a dummy page.
* vm_page_unreserve_pages(): Before adding to the system reserve make sure
  sUnreservedFreePages is non-negative. Otherwise we'd make nonexisting pages
  available for allocation. steal_pages() still has the same problem and it
  can't be solved that easily.
* map_page(): No longer changes the page state/mark the page unbusy. That's the
  caller's responsibility.


Modified: haiku/trunk/headers/private/kernel/vm/VMCache.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/headers/private/kernel/vm/VMCache.h     2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -90,6 +90,7 @@
                        void                            
NotifyPageEvents(vm_page* page, uint32 events)
                                                                        { if 
(fPageEventWaiters != NULL)
                                                                                
_NotifyPageEvents(page, events); }
+       inline  void                            MarkPageUnbusy(vm_page* page);
 
                        vm_page*                        LookupPage(off_t 
offset);
                        void                            InsertPage(vm_page* 
page, off_t offset);
@@ -291,6 +292,14 @@
 }
 
 
+void
+VMCache::MarkPageUnbusy(vm_page* page)
+{
+       page->busy = false;
+       NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
+}
+
+
 #ifdef __cplusplus
 extern "C" {
 #endif

Modified: haiku/trunk/headers/private/kernel/vm/vm_page.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -56,6 +56,7 @@
        int priority);
 struct vm_page *vm_page_at_index(int32 index);
 struct vm_page *vm_lookup_page(addr_t pageNumber);
+bool vm_page_is_dummy(struct vm_page *page);
 
 #ifdef __cplusplus
 }

Modified: haiku/trunk/headers/private/kernel/vm/vm_types.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -106,7 +106,7 @@
 #endif
 
        uint8                                   state : 3;
-       bool                                    is_dummy : 1;
+       bool                                    busy : 1;
        bool                                    busy_writing : 1;
                // used in VMAnonymousCache::Merge()
        bool                                    accessed : 1;
@@ -116,7 +116,6 @@
        int8                                    usage_count;
        uint16                                  wired_count;
 
-
        VMCacheRef* CacheRef() const                    { return cache_ref; }
        void SetCacheRef(VMCacheRef* cacheRef)  { this->cache_ref = cacheRef; }
 
@@ -128,7 +127,6 @@
 enum {
        PAGE_STATE_ACTIVE = 0,
        PAGE_STATE_INACTIVE,
-       PAGE_STATE_BUSY,
        PAGE_STATE_MODIFIED,
        PAGE_STATE_FREE,
        PAGE_STATE_CLEAR,

Modified: haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
===================================================================
--- haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-29 09:45:47 UTC (rev 35330)
+++ haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-29 10:00:45 UTC (rev 35331)
@@ -541,6 +541,7 @@
        if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
                memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, 
count,
                        VM_PRIORITY_SYSTEM);
+               // TODO: Mark pages unbusy!
                if (memory->page == NULL)
                        return B_NO_MEMORY;
        } else {
@@ -550,8 +551,10 @@
                        return B_NO_MEMORY;
 
                vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
-               for (uint32 i = 0; i < count; i++)
+               for (uint32 i = 0; i < count; i++) {
                        memory->pages[i] = 
vm_page_allocate_page(PAGE_STATE_CLEAR);
+                       memory->pages[i]->busy = false;
+               }
                vm_page_unreserve_pages(count);
        }
 

Modified: 
haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-29 09:45:47 UTC (rev 35330)
+++ haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-29 10:00:45 UTC (rev 35331)
@@ -547,6 +547,7 @@
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);
+               page->busy = false;
 
                DEBUG_PAGE_ACCESS_END(page);
 
@@ -595,6 +596,7 @@
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);
+               page->busy = false;
 
                DEBUG_PAGE_ACCESS_END(page);
 

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-29 09:45:47 UTC (rev 35330)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-29 10:00:45 UTC (rev 35331)
@@ -426,6 +426,7 @@
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);
+               page->busy = false;
 
                DEBUG_PAGE_ACCESS_END(page);
 

Modified: haiku/trunk/src/system/kernel/cache/file_cache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -209,8 +209,7 @@
 
                DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
 
-               fPages[i]->state = PAGE_STATE_ACTIVE;
-               fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
+               fCache->MarkPageUnbusy(fPages[i]);
 
                DEBUG_PAGE_ACCESS_END(fPages[i]);
        }
@@ -308,8 +307,7 @@
                                vm_page* page;
                                for (VMCachePagesTree::Iterator it = 
cache->pages.GetIterator();
                                                (page = it.Next()) != NULL && 
left > 0;) {
-                                       if (page->state != PAGE_STATE_MODIFIED
-                                               && page->state != 
PAGE_STATE_BUSY) {
+                                       if (page->state != PAGE_STATE_MODIFIED 
&& !page->busy) {
                                                DEBUG_PAGE_ACCESS_START(page);
                                                cache->RemovePage(page);
                                                vm_page_set_state(page, 
PAGE_STATE_FREE);
@@ -442,9 +440,7 @@
        for (int32 i = pageIndex; i-- > 0;) {
                DEBUG_PAGE_ACCESS_END(pages[i]);
 
-               pages[i]->state = PAGE_STATE_ACTIVE;
-
-               cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
+               cache->MarkPageUnbusy(pages[i]);
        }
 
        return B_OK;
@@ -610,11 +606,9 @@
 
        // make the pages accessible in the cache
        for (int32 i = pageIndex; i-- > 0;) {
-               ref->cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
+               ref->cache->MarkPageUnbusy(pages[i]);
 
-               if (writeThrough)
-                       pages[i]->state = PAGE_STATE_ACTIVE;
-               else
+               if (!writeThrough)
                        vm_page_set_state(pages[i], PAGE_STATE_MODIFIED);
 
                DEBUG_PAGE_ACCESS_END(pages[i]);
@@ -772,7 +766,7 @@
                        if (status != B_OK)
                                return status;
 
-                       if (page->state == PAGE_STATE_BUSY) {
+                       if (page->busy) {
                                cache->WaitForPageEvents(page, 
PAGE_EVENT_NOT_BUSY, true);
                                continue;
                        }
@@ -797,8 +791,7 @@
                                // need to unlock the cache temporarily to 
avoid a potential
                                // deadlock. To make sure that our page doesn't 
go away, we mark
                                // it busy for the time.
-                               uint8 oldPageState = page->state;
-                               page->state = PAGE_STATE_BUSY;
+                               page->busy = true;
                                locker.Unlock();
 
                                // copy the contents of the page already in 
memory
@@ -818,14 +811,13 @@
 
                                locker.Lock();
 
-                               page->state = oldPageState;
                                if (doWrite && page->state != 
PAGE_STATE_MODIFIED) {
                                        DEBUG_PAGE_ACCESS_START(page);
                                        vm_page_set_state(page, 
PAGE_STATE_MODIFIED);
                                        DEBUG_PAGE_ACCESS_END(page);
                                }
 
-                               cache->NotifyPageEvents(page, 
PAGE_EVENT_NOT_BUSY);
+                               cache->MarkPageUnbusy(page);
                        }
 
                        if (bytesLeft <= bytesInPage) {

Modified: haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-01-29 
09:45:47 UTC (rev 35330)
+++ haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-01-29 
10:00:45 UTC (rev 35331)
@@ -1352,6 +1352,7 @@
                vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
                cache->InsertPage(page, offset);
                vm_page_set_state(page, PAGE_STATE_WIRED);
+               page->busy = false;
 
                page->wired_count++;
                atomic_add(&gMappedPagesCount, 1);

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -1058,7 +1058,7 @@
                for (VMCachePagesTree::Iterator it
                                        = pages.GetIterator(newPageCount, true, 
true);
                                vm_page* page = it.Next();) {
-                       if (page->state == PAGE_STATE_BUSY) {
+                       if (page->busy) {
                                if (page->busy_writing) {
                                        // We cannot wait for the page to 
become available
                                        // as we might cause a deadlock this way
@@ -1107,7 +1107,7 @@
                // remove pages
                for (VMCachePagesTree::Iterator it = pages.GetIterator();
                                vm_page* page = it.Next();) {
-                       if (page->state == PAGE_STATE_BUSY) {
+                       if (page->busy) {
                                // wait for page to become unbusy
                                WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, 
true);
 

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-29 09:45:47 UTC (rev 
35330)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-29 10:00:45 UTC (rev 
35331)
@@ -515,9 +515,6 @@
        if (page->usage_count < 0)
                page->usage_count = 1;
 
-       if (page->state != PAGE_STATE_MODIFIED)
-               vm_page_set_state(page, PAGE_STATE_ACTIVE);
-
        return B_OK;
 }
 
@@ -1128,8 +1125,16 @@
                                vm_page* page = 
vm_page_allocate_page(newPageState);
                                cache->InsertPage(page, offset);
                                map_page(area, page, address, protection);
-                                       // TODO: This sets the page state to 
"active", but it would
-                                       // make more sense to set it to "wired".
+//                             vm_page_set_state(page, PAGE_STATE_WIRED);
+                                       // TODO: The pages should be 
PAGE_STATE_WIRED, since there's
+                                       // no need for the page daemon to play 
with them (the same
+                                       // should be considered in 
vm_soft_fault()). ATM doing that
+                                       // will result in bad thrashing in 
systems with little
+                                       // memory due to the current tuning of 
the page daemon. It
+                                       // will age pages way too fast (since 
it just skips
+                                       // PAGE_STATE_WIRED pages, while it 
processes
+                                       // PAGE_STATE_ACTIVE with wired_count > 
0).
+                               page->busy = false;
 
                                DEBUG_PAGE_ACCESS_END(page);
 
@@ -1179,6 +1184,7 @@
                                increment_page_wired_count(page);
                                cache->InsertPage(page, offset);
                                vm_page_set_state(page, PAGE_STATE_WIRED);
+                               page->busy = false;
 
                                DEBUG_PAGE_ACCESS_END(page);
                        }
@@ -1212,6 +1218,7 @@
                                increment_page_wired_count(page);
                                cache->InsertPage(page, offset);
                                vm_page_set_state(page, PAGE_STATE_WIRED);
+                               page->busy = false;
 
                                DEBUG_PAGE_ACCESS_END(page);
                        }
@@ -1511,7 +1518,7 @@
                        break;
 
                // skip inactive pages
-               if (page->state == PAGE_STATE_BUSY || page->usage_count <= 0)
+               if (page->busy || page->usage_count <= 0)
                        continue;
 
                DEBUG_PAGE_ACCESS_START(page);
@@ -1817,7 +1824,7 @@
                        // map in all pages from source
                        for (VMCachePagesTree::Iterator it = 
cache->pages.GetIterator();
                                        vm_page* page  = it.Next();) {
-                               if (page->state != PAGE_STATE_BUSY) {
+                               if (!page->busy) {
                                        DEBUG_PAGE_ACCESS_START(page);
                                        map_page(newArea, page,
                                                newArea->Base() + 
((page->cache_offset << PAGE_SHIFT)
@@ -2738,7 +2745,7 @@
        if (showPages) {
                for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
                                vm_page* page = it.Next();) {
-                       if (!page->is_dummy) {
+                       if (!vm_page_is_dummy(page)) {
                                kprintf("\t%p ppn 0x%lx offset 0x%lx state %u 
(%s) "
                                        "wired_count %u\n", page, 
page->physical_page_number,
                                        page->cache_offset, page->state,
@@ -3690,7 +3697,7 @@
 
                for (;;) {
                        page = cache->LookupPage(context.cacheOffset);
-                       if (page == NULL || page->state != PAGE_STATE_BUSY) {
+                       if (page == NULL || !page->busy) {
                                // Either there is no page or there is one and 
it is not busy.
                                break;
                        }
@@ -3745,8 +3752,7 @@
                        }
 
                        // mark the page unbusy again
-                       page->state = PAGE_STATE_ACTIVE;
-                       cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
+                       cache->MarkPageUnbusy(page);
 
                        DEBUG_PAGE_ACCESS_END(page);
 
@@ -3769,12 +3775,12 @@
 
                // allocate a clean page
                page = vm_page_allocate_page(PAGE_STATE_CLEAR);
+               page->busy = false;
                FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
                        page->physical_page_number));
 
                // insert the new page into our cache
                cache->InsertPage(page, context.cacheOffset);
-
        } else if (page->Cache() != context.topCache && context.isWrite) {
                // We have a page that has the data we want, but in the wrong 
cache
                // object so we need to copy it and stick it into the top cache.
@@ -3784,12 +3790,12 @@
                // from our source cache -- if possible, that is.
                FTRACE(("get new page, copy it, and put it into the topmost 
cache\n"));
                page = vm_page_allocate_page(PAGE_STATE_FREE);
+               page->busy = false;
 
                // To not needlessly kill concurrency we unlock all caches but 
the top
                // one while copying the page. Lacking another mechanism to 
ensure that
                // the source page doesn't disappear, we mark it busy.
-               int sourcePageState = sourcePage->state;
-               sourcePage->state = PAGE_STATE_BUSY;
+               sourcePage->busy = true;
                context.cacheChainLocker.UnlockKeepRefs(true);
 
                // copy the page
@@ -3797,8 +3803,7 @@
                        sourcePage->physical_page_number * B_PAGE_SIZE);
 
                context.cacheChainLocker.RelockCaches(true);
-               sourcePage->state = sourcePageState;
-               sourcePage->Cache()->NotifyPageEvents(sourcePage, 
PAGE_EVENT_NOT_BUSY);
+               sourcePage->Cache()->MarkPageUnbusy(sourcePage);
 
                // insert the new page into our cache
                context.topCache->InsertPage(page, context.cacheOffset);
@@ -3964,8 +3969,6 @@
                                // fine, though. We'll simply leave and 
probably fault again.
                                // To make sure we'll have more luck then, we 
ensure that the
                                // minimum object reserve is available.
-                               if (context.page->state == PAGE_STATE_BUSY)
-                                       vm_page_set_state(context.page, 
PAGE_STATE_ACTIVE);
                                DEBUG_PAGE_ACCESS_END(context.page);
 
                                context.UnlockAll();

Modified: haiku/trunk/src/system/kernel/vm/vm_daemons.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_daemons.cpp     2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/src/system/kernel/vm/vm_daemons.cpp     2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -65,7 +65,7 @@
 bool
 PageCacheLocker::_IgnorePage(vm_page* page)
 {
-       if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY
+       if (page->busy || page->state == PAGE_STATE_WIRED
                || page->state == PAGE_STATE_FREE || page->state == 
PAGE_STATE_CLEAR
                || page->state == PAGE_STATE_UNUSED || page->wired_count > 0)
                return true;
@@ -257,7 +257,7 @@
        if (!locker.IsLocked())
                return;
 
-       if (page->state == PAGE_STATE_ACTIVE)
+       if (!page->busy && page->state == PAGE_STATE_ACTIVE)
                vm_clear_map_flags(page, PAGE_ACCESSED);
 }
 

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-29 09:45:47 UTC 
(rev 35330)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-29 10:00:45 UTC 
(rev 35331)
@@ -334,8 +334,6 @@
                        return "active";
                case PAGE_STATE_INACTIVE:
                        return "inactive";
-               case PAGE_STATE_BUSY:
-                       return "busy";
                case PAGE_STATE_MODIFIED:
                        return "modified";
                case PAGE_STATE_FREE:
@@ -420,10 +418,10 @@
        kprintf("cache:           %p\n", page->Cache());
        kprintf("cache_offset:    %ld\n", page->cache_offset);
        kprintf("cache_next:      %p\n", page->cache_next);
-       kprintf("is dummy:        %d\n", page->is_dummy);
        kprintf("state:           %s\n", page_state_to_string(page->state));
        kprintf("wired_count:     %d\n", page->wired_count);
        kprintf("usage_count:     %d\n", page->usage_count);
+       kprintf("busy:            %d\n", page->busy);
        kprintf("busy_writing:    %d\n", page->busy_writing);
        kprintf("accessed:        %d\n", page->accessed);
        kprintf("modified:        %d\n", page->modified);
@@ -545,16 +543,20 @@
 {
        page_num_t swappableModified = 0;
        page_num_t swappableModifiedInactive = 0;
-       uint32 counter[8];
+       size_t counter[8];
+       size_t busyCounter[8];
        addr_t i;
 
        memset(counter, 0, sizeof(counter));
+       memset(busyCounter, 0, sizeof(busyCounter));
 
        for (i = 0; i < sNumPages; i++) {
                if (sPages[i].state > 7)
                        panic("page %li at %p has invalid state!\n", i, 
&sPages[i]);
 
                counter[sPages[i].state]++;
+               if (sPages[i].busy)
+                       busyCounter[sPages[i].state]++;
 
                if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() 
!= NULL
                        && sPages[i].Cache()->temporary && 
sPages[i].wired_count == 0) {
@@ -566,12 +568,20 @@
 
        kprintf("page stats:\n");
        kprintf("total: %lu\n", sNumPages);
-       kprintf("active: %lu\ninactive: %lu\nbusy: %lu\nunused: %lu\n",
-               counter[PAGE_STATE_ACTIVE], counter[PAGE_STATE_INACTIVE],
-               counter[PAGE_STATE_BUSY], counter[PAGE_STATE_UNUSED]);
-       kprintf("wired: %lu\nmodified: %lu\nfree: %lu\nclear: %lu\n",
-               counter[PAGE_STATE_WIRED], counter[PAGE_STATE_MODIFIED],
-               counter[PAGE_STATE_FREE], counter[PAGE_STATE_CLEAR]);
+
+       kprintf("active: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]);
+       kprintf("inactive: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_INACTIVE], busyCounter[PAGE_STATE_INACTIVE]);
+       kprintf("unused: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_UNUSED], busyCounter[PAGE_STATE_UNUSED]);
+       kprintf("wired: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_WIRED], busyCounter[PAGE_STATE_WIRED]);
+       kprintf("modified: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_MODIFIED], busyCounter[PAGE_STATE_MODIFIED]);
+       kprintf("free: %" B_PRIuSIZE "\n", counter[PAGE_STATE_FREE]);
+       kprintf("clear: %" B_PRIuSIZE "\n", counter[PAGE_STATE_CLEAR]);
+
        kprintf("unreserved free pages: %" B_PRId32 "\n", sUnreservedFreePages);
        kprintf("system reserved pages: %" B_PRId32 "\n", sSystemReservedPages);
        kprintf("page deficit: %lu\n", sPageDeficit);
@@ -626,7 +636,6 @@
        VMPageQueue* fromQueue;
 
        switch (page->state) {
-               case PAGE_STATE_BUSY:
                case PAGE_STATE_ACTIVE:
                        fromQueue = &sActivePageQueue;
                        break;
@@ -699,7 +708,6 @@
        VMPageQueue* fromQueue;
 
        switch (page->state) {
-               case PAGE_STATE_BUSY:
                case PAGE_STATE_ACTIVE:
                        fromQueue = &sActivePageQueue;
                        break;
@@ -726,7 +734,6 @@
        VMPageQueue* toQueue;
 
        switch (pageState) {
-               case PAGE_STATE_BUSY:
                case PAGE_STATE_ACTIVE:
                        toQueue = &sActivePageQueue;
                        break;
@@ -862,7 +869,8 @@
 
                        DEBUG_PAGE_ACCESS_START(page[i]);
 
-                       page[i]->state = PAGE_STATE_BUSY;
+                       page[i]->state = PAGE_STATE_ACTIVE;
+                       page[i]->busy = true;
                        scrubCount++;
                }
 
@@ -884,6 +892,7 @@
                // and put them into the clear queue
                for (int32 i = 0; i < scrubCount; i++) {
                        page[i]->state = PAGE_STATE_CLEAR;
+                       page[i]->busy = false;
                        DEBUG_PAGE_ACCESS_END(page[i]);
                        sClearPageQueue.PrependUnlocked(page[i]);
                }
@@ -899,13 +908,6 @@
 }
 
 
-static inline bool
-is_marker_page(struct vm_page *page)
-{
-       return page->is_dummy;
-}
-
-
 static void
 remove_page_marker(struct vm_page &marker)
 {
@@ -953,7 +955,7 @@
                page = sModifiedPageQueue.Head();
 
        for (; page != NULL; page = sModifiedPageQueue.Next(page)) {
-               if (!is_marker_page(page) && page->state != PAGE_STATE_BUSY) {
+               if (!page->busy) {
                        // insert marker
                        marker.state = PAGE_STATE_MODIFIED;
                        sModifiedPageQueue.InsertAfter(page, &marker);
@@ -1035,7 +1037,6 @@
        struct VMCache*         fCache;
        bool                            fDequeuedPage;
        bool                            fIsActive;
-       int                                     fOldPageState;
 };
 
 
@@ -1060,7 +1061,7 @@
 {
        DEBUG_PAGE_ACCESS_CHECK(page);
 
-       if (page->state == PAGE_STATE_BUSY)
+       if (page->busy)
                panic("setting page write wrapper to busy page");
 
        if (fIsActive)
@@ -1071,8 +1072,7 @@
        fDequeuedPage = dequeuedPage;
        fIsActive = true;
 
-       fOldPageState = fPage->state;
-       fPage->state = PAGE_STATE_BUSY;
+       fPage->busy = true;
        fPage->busy_writing = true;
 }
 
@@ -1120,6 +1120,9 @@
 
        DEBUG_PAGE_ACCESS_CHECK(fPage);
 
+       fPage->busy = false;
+               // Set unbusy and notify later by hand, since we might free the 
page.
+
        if (result == B_OK) {
                // put it into the active/inactive queue
                move_page_to_active_or_inactive_queue(fPage, fDequeuedPage);
@@ -1132,10 +1135,8 @@
                if (fDequeuedPage) {
                        fPage->state = PAGE_STATE_MODIFIED;
                        sModifiedPageQueue.AppendUnlocked(fPage);
-               } else {
-                       fPage->state = fOldPageState;
+               } else
                        set_page_state(fPage, PAGE_STATE_MODIFIED);
-               }
 
                if (!fPage->busy_writing) {
                        // The busy_writing flag was cleared. That means the 
cache has been
@@ -1400,9 +1401,9 @@
        }
 
        vm_page marker;
-       marker.is_dummy = true;
        marker.SetCacheRef(NULL);
        marker.state = PAGE_STATE_UNUSED;
+       marker.busy = true;
 #if DEBUG_PAGE_QUEUE
        marker.queue = NULL;
 #endif
@@ -1493,7 +1494,7 @@
                        }
 
                        // state might have changed while we were locking the 
cache
-                       if (page->state != PAGE_STATE_MODIFIED) {
+                       if (page->busy || page->state != PAGE_STATE_MODIFIED) {
                                // release the cache reference
                                DEBUG_PAGE_ACCESS_END(page);
                                cache->ReleaseStoreRef();
@@ -1565,7 +1566,7 @@
        }
 
        while (page != NULL) {
-               if (!is_marker_page(page) && page->state == 
PAGE_STATE_INACTIVE) {
+               if (!page->busy) {
                        // we found a candidate, insert marker
                        marker.state = PAGE_STATE_INACTIVE;
                        sInactivePageQueue.InsertAfter(page, &marker);
@@ -1591,7 +1592,7 @@
        MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked);
 
        // check again if that page is still a candidate
-       if (page->state != PAGE_STATE_INACTIVE)
+       if (page->busy || page->state != PAGE_STATE_INACTIVE)
                return false;
 
        DEBUG_PAGE_ACCESS_START(page);
@@ -1628,9 +1629,9 @@
 {
        while (true) {
                vm_page marker;
-               marker.is_dummy = true;
                marker.SetCacheRef(NULL);
                marker.state = PAGE_STATE_UNUSED;
+               marker.busy = true;
 #if DEBUG_PAGE_QUEUE
                marker.queue = NULL;
 #endif
@@ -1765,12 +1766,13 @@
 
                bool dequeuedPage = false;
                if (page != NULL) {
-                       if (page->state == PAGE_STATE_MODIFIED) {
+                       if (page->busy) {
+                               page = NULL;
+                       } else if (page->state == PAGE_STATE_MODIFIED) {
                                DEBUG_PAGE_ACCESS_START(page);
                                sModifiedPageQueue.RemoveUnlocked(page);
                                dequeuedPage = true;
-                       } else if (page->state == PAGE_STATE_BUSY
-                                       || !vm_test_map_modification(page)) {
+                       } else if (!vm_test_map_modification(page)) {
                                page = NULL;
                        } else
                                DEBUG_PAGE_ACCESS_START(page);
@@ -1869,7 +1871,7 @@
                if (page->cache_offset >= endPage)
                        break;
 
-               if (page->state == PAGE_STATE_MODIFIED) {
+               if (!page->busy && page->state == PAGE_STATE_MODIFIED) {
                        DEBUG_PAGE_ACCESS_START(page);
                        vm_page_requeue(page, false);
                        modified++;
@@ -1930,7 +1932,6 @@
        // initialize the free page table
        for (uint32 i = 0; i < sNumPages; i++) {
                sPages[i].physical_page_number = sPhysicalPageOffset + i;
-               sPages[i].is_dummy = false;
                sPages[i].state = PAGE_STATE_FREE;
                new(&sPages[i].mappings) vm_page_mappings();
                sPages[i].wired_count = 0;
@@ -2066,6 +2067,7 @@
                                        ? sFreePageQueue : sClearPageQueue;
                                queue.Remove(page);
                                page->state = PAGE_STATE_UNUSED;
+                               page->busy = false;
                                atomic_add(&sUnreservedFreePages, -1);
                                DEBUG_PAGE_ACCESS_END(page);
                                break;
@@ -2074,7 +2076,6 @@
                                break;
                        case PAGE_STATE_ACTIVE:
                        case PAGE_STATE_INACTIVE:
-                       case PAGE_STATE_BUSY:
                        case PAGE_STATE_MODIFIED:
                        case PAGE_STATE_UNUSED:
                        default:
@@ -2103,6 +2104,25 @@
        T(UnreservePages(count));
 
        while (true) {
+               int32 freePages = sUnreservedFreePages;
+               if (freePages >= 0)
+                       break;
+
+               int32 toUnreserve = std::min((int32)count, -freePages);
+               if (atomic_test_and_set(&sUnreservedFreePages,
+                               freePages + toUnreserve, freePages) == 
freePages) {
+                       count -= toUnreserve;
+                       if (count == 0) {
+                               // TODO: Notify waiting system priority 
reservers.
+                               return;
+                       }
+                       break;
+               }
+
+               // the count changed in the meantime -- retry
+       }
+
+       while (true) {
                int32 systemReserve = sSystemReservedPages;
                if (systemReserve >= (int32)kMinimumSystemReserve)
                        break;
@@ -2113,8 +2133,10 @@
                                        systemReserve + toUnreserve, 
systemReserve)
                                == systemReserve) {
                        count -= toUnreserve;
-                       if (count == 0)
+                       if (count == 0) {
+                               // TODO: Notify waiting system priority 
reservers.
                                return;
+                       }
                        break;
                }
 
@@ -2314,7 +2336,8 @@
        DEBUG_PAGE_ACCESS_START(page);
 
        int oldPageState = page->state;
-       page->state = PAGE_STATE_BUSY;
+       page->state = PAGE_STATE_ACTIVE;
+       page->busy = true;
        page->usage_count = 2;
        page->accessed = false;
        page->modified = false;
@@ -2352,7 +2375,8 @@
                        freePages.Add(&page);
                }
 
-               page.state = PAGE_STATE_BUSY;
+               page.state = PAGE_STATE_ACTIVE;
+               page.busy = true;
                page.usage_count = 1;
                page.accessed = false;
                page.modified = false;
@@ -2497,6 +2521,13 @@
 }
 
 
+bool
+vm_page_is_dummy(struct vm_page *page)
+{
+       return page < sPages || page >= sPages + sNumPages;
+}
+
+
 /*!    Free the page that belonged to a certain cache.
        You can use vm_page_set_state() manually if you prefer, but only
        if the page does not equal PAGE_STATE_MODIFIED.
@@ -2538,7 +2569,6 @@
        VMPageQueue *queue = NULL;
 
        switch (page->state) {
-               case PAGE_STATE_BUSY:
                case PAGE_STATE_ACTIVE:
                        queue = &sActivePageQueue;
                        break;


Other related posts:

  • » [haiku-commits] r35331 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ... - ingo_weinhold