[haiku-commits] r35485 - haiku/trunk/src/system/kernel/vm

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Mon, 15 Feb 2010 23:40:37 +0100 (CET)

Author: bonefish
Date: 2010-02-15 23:40:37 +0100 (Mon, 15 Feb 2010)
New Revision: 35485
Changeset: http://dev.haiku-os.org/changeset/35485/haiku
Ticket: http://dev.haiku-os.org/ticket/5382
Ticket: http://dev.haiku-os.org/ticket/5404

Modified:
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
page_writer():
* Use the same criterion when to write back temporary pages as the page daemon.
* Move wired and temporary pages that shall not be written back to the active or
  inactive queue so they don't get stuck in the modified queue and potentially
  cause the page writer to run permanently without actually making progress
  (#5382).

page writing implementation:
* If writing back a temporary page failed, move it back to the inactive or
  active queue, so it doesn't get stuck in the modified queue. If active paging
  continues, it might find its way back to the modified queue in the next
  iteration, but that improves the situation a bit at least. Particularly with
  the port heap pages not really being swappable ATM.
* Never dequeue pages from the modified queue. We mark them busy, so the page
  writer will skip them anyway. This allows others to play with the page to some
  extend (at least allowing to move it between the queues). This fixes #5404.
* Removed PageWriteWrapper::ClearModifiedFlag(). We clear the modified flag in
  PageWriteWrapper::SetTo(), now, so that the page writer doesn't need to do
  that explicitly either.


Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-02-15 22:19:49 UTC 
(rev 35484)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-02-15 22:40:37 UTC 
(rev 35485)
@@ -130,6 +130,13 @@
 #endif
 
 
+struct page_stats {
+       int32   totalFreePages;
+       int32   unsatisfiedReservations;
+       int32   cachedPages;
+};
+
+
 struct PageReservationWaiter
                : public DoublyLinkedListLinkImpl<PageReservationWaiter> {
        struct thread*  thread;
@@ -875,22 +882,25 @@
 // #pragma mark -
 
 
-struct page_stats {
-       int32   totalFreePages;
-       int32   unsatisfiedReservations;
-       int32   cachedPages;
-};
-
 static void
-get_page_stats(page_stats& pageStats)
+get_page_stats(page_stats& _pageStats)
 {
-       pageStats.totalFreePages = sUnreservedFreePages;
-       pageStats.cachedPages = sCachedPageQueue.Count();
-       pageStats.unsatisfiedReservations = sUnsatisfiedPageReservations;
+       _pageStats.totalFreePages = sUnreservedFreePages;
+       _pageStats.cachedPages = sCachedPageQueue.Count();
+       _pageStats.unsatisfiedReservations = sUnsatisfiedPageReservations;
        // TODO: We don't get an actual snapshot here!
 }
 
 
+static bool
+do_active_paging(const page_stats& pageStats)
+{
+       return pageStats.totalFreePages + pageStats.cachedPages
+               < pageStats.unsatisfiedReservations
+                       + (int32)sFreeOrCachedPagesTarget;
+}
+
+
 /*!    Reserves as many pages as possible from \c sUnreservedFreePages up to
        \a count. Doesn't touch the last \a dontTouch pages of
        \c sUnreservedFreePages, though.
@@ -1123,7 +1133,7 @@
        The page queues must not be locked.
 */
 static void
-move_page_to_active_or_inactive_queue(vm_page *page, bool dequeued)
+move_page_to_active_or_inactive_queue(vm_page *page)
 {
        DEBUG_PAGE_ACCESS_CHECK(page);
 
@@ -1138,13 +1148,7 @@
 
 // TODO: If free + cached pages are low, we might directly want to free the
 // page.
-       if (dequeued) {
-               page->state = state;
-               sPageQueues[state].AppendUnlocked(page);
-               if (page->Cache()->temporary)
-                       atomic_add(&sModifiedTemporaryPages, -1);
-       } else
-               set_page_state(page, state);
+       set_page_state(page, state);
 }
 
 
@@ -1353,14 +1357,12 @@
 public:
        PageWriteWrapper();
        ~PageWriteWrapper();
-       void SetTo(vm_page* page, bool dequeuedPage);
-       void ClearModifiedFlag();
+       void SetTo(vm_page* page);
        void Done(status_t result);
 
 private:
        vm_page*                        fPage;
        struct VMCache*         fCache;
-       bool                            fDequeuedPage;
        bool                            fIsActive;
 };
 
@@ -1382,7 +1384,7 @@
 /*!    The page's cache must be locked.
 */
 void
-PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
+PageWriteWrapper::SetTo(vm_page* page)
 {
        DEBUG_PAGE_ACCESS_CHECK(page);
 
@@ -1394,28 +1396,20 @@
 
        fPage = page;
        fCache = page->Cache();
-       fDequeuedPage = dequeuedPage;
        fIsActive = true;
 
        fPage->busy = true;
        fPage->busy_writing = true;
-}
 
-
-/*!    The page's cache must be locked.
-*/
-void
-PageWriteWrapper::ClearModifiedFlag()
-{
-       // We have a modified page - however, while we're writing it back,
-       // the page is still mapped. In order not to lose any changes to the
+       // We have a modified page -- however, while we're writing it back,
+       // the page might still be mapped. In order not to lose any changes to 
the
        // page, we mark it clean before actually writing it back; if
-       // writing the page fails for some reason, we just keep it in the
+       // writing the page fails for some reason, we'll just keep it in the
        // modified page list, but that should happen only rarely.
 
-       // If the page is changed after we cleared the dirty flag, but
-       // before we had the chance to write it back, then we'll write it
-       // again later - that will probably not happen that often, though.
+       // If the page is changed after we cleared the dirty flag, but before we
+       // had the chance to write it back, then we'll write it again later -- 
that
+       // will probably not happen that often, though.
 
        vm_clear_map_flags(fPage, PAGE_MODIFIED);
 }
@@ -1430,25 +1424,20 @@
        if (!fIsActive)
                panic("completing page write wrapper that is not active");
 
-       DEBUG_PAGE_ACCESS_CHECK(fPage);
+       DEBUG_PAGE_ACCESS_START(fPage);
 
        fPage->busy = false;
                // Set unbusy and notify later by hand, since we might free the 
page.
 
        if (result == B_OK) {
                // put it into the active/inactive queue
-               move_page_to_active_or_inactive_queue(fPage, fDequeuedPage);
+               move_page_to_active_or_inactive_queue(fPage);
                fPage->busy_writing = false;
                DEBUG_PAGE_ACCESS_END(fPage);
        } else {
-               // Writing the page failed -- move to the modified queue. If we 
dequeued
-               // it from there, just enqueue it again, otherwise set the page 
state
-               // explicitly, which will take care of moving between the 
queues.
-               if (fDequeuedPage) {
-                       fPage->state = PAGE_STATE_MODIFIED;
-                       sModifiedPageQueue.AppendUnlocked(fPage);
-               } else
-                       set_page_state(fPage, PAGE_STATE_MODIFIED);
+               // Writing the page failed. One reason would be that the cache 
has been
+               // shrunk and the page does no longer belong to the file. 
Otherwise the
+               // actual I/O failed, in which case we'll simply keep the page 
modified.
 
                if (!fPage->busy_writing) {
                        // The busy_writing flag was cleared. That means the 
cache has been
@@ -1458,11 +1447,23 @@
 // TODO: Unmapping should already happen when resizing the cache!
                        fCache->RemovePage(fPage);
                        free_page(fPage, false);
+               } else {
+                       // Writing the page failed -- mark the page modified 
and move it to
+                       // an appropriate queue other than the modified queue, 
so we don't
+                       // keep trying to write it over and over again. We keep
+                       // non-temporary pages in the modified queue, though, 
so they don't
+                       // get lost in the inactive queue.
+                       dprintf("PageWriteWrapper: Failed to write page %p: 
%s\n", fPage,
+                               strerror(result));
 
-                       // Adjust temporary modified pages count, if necessary.
-                       if (fDequeuedPage && fCache->temporary)
-                               atomic_add(&sModifiedTemporaryPages, -1);
-               } else {
+                       fPage->modified = true;
+                       if (!fCache->temporary)
+                               set_page_state(fPage, PAGE_STATE_MODIFIED);
+                       else if (fPage->wired_count > 0 || 
!fPage->mappings.IsEmpty())
+                               set_page_state(fPage, PAGE_STATE_ACTIVE);
+                       else
+                               set_page_state(fPage, PAGE_STATE_INACTIVE);
+
                        fPage->busy_writing = false;
                        DEBUG_PAGE_ACCESS_END(fPage);
                }
@@ -1621,7 +1622,7 @@
 void
 PageWriterRun::AddPage(vm_page* page)
 {
-       fWrappers[fWrapperCount++].SetTo(page, true);
+       fWrappers[fWrapperCount++].SetTo(page);
 
        if (fTransferCount == 0 || !fTransfers[fTransferCount - 
1].AddPage(page)) {
                fTransfers[fTransferCount++].SetTo(this, page,
@@ -1720,6 +1721,12 @@
                if (modifiedPages == 0)
                        continue;
 
+#if ENABLE_SWAP_SUPPORT
+               page_stats pageStats;
+               get_page_stats(pageStats);
+               bool activePaging = do_active_paging(pageStats);
+#endif
+
                // depending on how urgent it becomes to get pages to disk, we 
adjust
                // our I/O priority
                uint32 lowPagesState = 
low_resource_state(B_KERNEL_RESOURCE_PAGES);
@@ -1742,10 +1749,6 @@
                // enough to do).
 
                // collect pages to be written
-#if ENABLE_SWAP_SUPPORT
-               bool lowOnPages = lowPagesState != B_NO_LOW_RESOURCE;
-#endif
-
                pageCollectionTime -= system_time();
 
                while (numPages < kNumPages) {
@@ -1757,27 +1760,42 @@
                        if (!cacheLocker.IsLocked())
                                continue;
 
+                       VMCache *cache = page->Cache();
+
+                       // If the page is busy or its state has changed while 
we were
+                       // locking the cache, just ignore it.
+                       if (page->busy || page->state != PAGE_STATE_MODIFIED)
+                               continue;
+
                        DEBUG_PAGE_ACCESS_START(page);
 
-                       VMCache *cache = page->Cache();
+                       // Don't write back wired (locked) pages.
+                       if (page->wired_count > 0) {
+                               set_page_state(page, PAGE_STATE_ACTIVE);
+                               DEBUG_PAGE_ACCESS_END(page);
+                               continue;
+                       }
 
-                       // Don't write back wired (locked) pages and don't 
write RAM pages
-                       // until we're low on pages. Also avoid writing 
temporary pages that
-                       // are active.
-                       if (page->wired_count > 0
-                               || (cache->temporary
+                       // Write back temporary pages only when we're actively 
paging.
+                       if (cache->temporary
 #if ENABLE_SWAP_SUPPORT
-                                       && (!lowOnPages /*|| page->usage_count 
> 0*/
-                                               || !cache->CanWritePage(
-                                                               
(off_t)page->cache_offset << PAGE_SHIFT))
+                               && (!activePaging
+                                       || !cache->CanWritePage(
+                                                       
(off_t)page->cache_offset << PAGE_SHIFT))
 #endif
-                               )) {
+                               ) {
+                               // We can't/don't want to do anything with this 
page, so move it
+                               // to one of the other queues.
+                               if (page->mappings.IsEmpty())
+                                       set_page_state(page, 
PAGE_STATE_INACTIVE);
+                               else
+                                       set_page_state(page, PAGE_STATE_ACTIVE);
+
                                DEBUG_PAGE_ACCESS_END(page);
-                               continue;
                        }
 
-                       // we need our own reference to the store, as it might
-                       // currently be destructed
+                       // We need our own reference to the store, as it might 
currently be
+                       // destroyed.
                        if (cache->AcquireUnreferencedStoreRef() != B_OK) {
                                DEBUG_PAGE_ACCESS_END(page);
                                cacheLocker.Unlock();
@@ -1785,21 +1803,13 @@
                                continue;
                        }
 
-                       // state might have changed while we were locking the 
cache
-                       if (page->busy || page->state != PAGE_STATE_MODIFIED) {
-                               // release the cache reference
-                               DEBUG_PAGE_ACCESS_END(page);
-                               cache->ReleaseStoreRef();
-                               continue;
-                       }
-
-                       sModifiedPageQueue.RemoveUnlocked(page);
                        run.AddPage(page);
 
+                       DEBUG_PAGE_ACCESS_END(page);
+
                        //dprintf("write page %p, cache %p (%ld)\n", page, 
page->cache, page->cache->ref_count);
                        TPW(WritePage(page));
 
-                       vm_clear_map_flags(page, PAGE_MODIFIED);
                        cache->AcquireRefLocked();
                        numPages++;
                }
@@ -2329,9 +2339,7 @@
                page_stats pageStats;
                get_page_stats(pageStats);
 
-               if (pageStats.totalFreePages + pageStats.cachedPages
-                               >= pageStats.unsatisfiedReservations
-                                       + (int32)sFreeOrCachedPagesTarget) {
+               if (!do_active_paging(pageStats)) {
                        // Things look good -- just maintain statistics and 
keep the pool
                        // of actually free pages full enough.
                        despairLevel = 0;
@@ -2478,18 +2486,12 @@
                        page = NULL;
                }
 
-               bool dequeuedPage = false;
                if (page != NULL) {
-                       if (page->busy) {
+                       if (page->busy
+                               || (page->state != PAGE_STATE_MODIFIED
+                                       && !vm_test_map_modification(page))) {
                                page = NULL;
-                       } else if (page->state == PAGE_STATE_MODIFIED) {
-                               DEBUG_PAGE_ACCESS_START(page);
-                               sModifiedPageQueue.RemoveUnlocked(page);
-                               dequeuedPage = true;
-                       } else if (!vm_test_map_modification(page)) {
-                               page = NULL;
-                       } else
-                               DEBUG_PAGE_ACCESS_START(page);
+                       }
                }
 
                PageWriteWrapper* wrapper = NULL;
@@ -2498,9 +2500,12 @@
                        if (nextWrapper > maxPages)
                                nextWrapper = 0;
 
-                       wrapper->SetTo(page, dequeuedPage);
-                       wrapper->ClearModifiedFlag();
+                       DEBUG_PAGE_ACCESS_START(page);
 
+                       wrapper->SetTo(page);
+
+                       DEBUG_PAGE_ACCESS_END(page);
+
                        if (transferEmpty || transfer.AddPage(page)) {
                                if (transferEmpty) {
                                        transfer.SetTo(NULL, page, maxPages);


Other related posts:

  • » [haiku-commits] r35485 - haiku/trunk/src/system/kernel/vm - ingo_weinhold