[haiku-commits] r35333 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ...

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Fri, 29 Jan 2010 16:54:40 +0100 (CET)

Author: bonefish
Date: 2010-01-29 16:54:40 +0100 (Fri, 29 Jan 2010)
New Revision: 35333
Changeset: http://dev.haiku-os.org/changeset/35333/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/vm_page.h
   haiku/trunk/headers/private/kernel/vm/vm_types.h
   haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
   haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/cache/file_cache.cpp
   haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
* Replaced the vm_page_allocate_page*() "pageState" parameter by a more
  general "flags" parameter. It encodes the target state of the page -- so
  that the page isn't unnecessarily put in the wrong page queue first -- a
  flag whether the page should be cleared, and one to indicate whether the
  page should be marked busy.
* Added page state PAGE_STATE_CACHED. Not used yet.


Modified: haiku/trunk/headers/private/kernel/vm/vm_page.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-29 15:20:45 UTC 
(rev 35332)
+++ haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-29 15:54:40 UTC 
(rev 35333)
@@ -49,10 +49,10 @@
 void vm_page_reserve_pages(uint32 count, int priority);
 bool vm_page_try_reserve_pages(uint32 count, int priority);
 
-struct vm_page *vm_page_allocate_page(int pageState);
-struct vm_page *vm_page_allocate_page_run(int state, addr_t base,
+struct vm_page *vm_page_allocate_page(uint32 flags);
+struct vm_page *vm_page_allocate_page_run(uint32 flags, addr_t base,
        addr_t length, int priority);
-struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count,
+struct vm_page *vm_page_allocate_page_run_no_base(uint32 flags, addr_t count,
        int priority);
 struct vm_page *vm_page_at_index(int32 index);
 struct vm_page *vm_lookup_page(addr_t pageNumber);

Modified: haiku/trunk/headers/private/kernel/vm/vm_types.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-29 15:20:45 UTC 
(rev 35332)
+++ haiku/trunk/headers/private/kernel/vm/vm_types.h    2010-01-29 15:54:40 UTC 
(rev 35333)
@@ -128,13 +128,23 @@
        PAGE_STATE_ACTIVE = 0,
        PAGE_STATE_INACTIVE,
        PAGE_STATE_MODIFIED,
+       PAGE_STATE_CACHED,
        PAGE_STATE_FREE,
        PAGE_STATE_CLEAR,
        PAGE_STATE_WIRED,
-       PAGE_STATE_UNUSED
+       PAGE_STATE_UNUSED,
+
+       PAGE_STATE_COUNT,
+
+       PAGE_STATE_FIRST_UNQUEUED = PAGE_STATE_WIRED
 };
 
 
+#define VM_PAGE_ALLOC_STATE    0x00000007
+#define VM_PAGE_ALLOC_CLEAR    0x00000010
+#define VM_PAGE_ALLOC_BUSY     0x00000020
+
+
 #if DEBUG_PAGE_ACCESS
 #      include <thread.h>
 

Modified: haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
===================================================================
--- haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-29 15:20:45 UTC (rev 35332)
+++ haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-29 15:54:40 UTC (rev 35333)
@@ -539,9 +539,9 @@
        uint32 count = size / B_PAGE_SIZE;
 
        if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
-               memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, 
count,
+               memory->page = vm_page_allocate_page_run(
+                       PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, 0, count,
                        VM_PRIORITY_SYSTEM);
-               // TODO: Mark pages unbusy!
                if (memory->page == NULL)
                        return B_NO_MEMORY;
        } else {
@@ -552,8 +552,8 @@
 
                vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
                for (uint32 i = 0; i < count; i++) {
-                       memory->pages[i] = 
vm_page_allocate_page(PAGE_STATE_CLEAR);
-                       memory->pages[i]->busy = false;
+                       memory->pages[i] = vm_page_allocate_page(
+                               PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
                }
                vm_page_unreserve_pages(count);
        }

Modified: 
haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-29 15:20:45 UTC (rev 35332)
+++ haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-29 15:54:40 UTC (rev 35333)
@@ -543,12 +543,8 @@
                unsigned int i;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
+               page = vm_page_allocate_page(PAGE_STATE_WIRED | 
VM_PAGE_ALLOC_CLEAR);
 
-               // mark the page WIRED
-               vm_page_set_state(page, PAGE_STATE_WIRED);
-               page->busy = false;
-
                DEBUG_PAGE_ACCESS_END(page);
 
                pgdir = page->physical_page_number * B_PAGE_SIZE;
@@ -592,12 +588,8 @@
                unsigned int i;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
+               page = vm_page_allocate_page(PAGE_STATE_WIRED | 
VM_PAGE_ALLOC_CLEAR);
 
-               // mark the page WIRED
-               vm_page_set_state(page, PAGE_STATE_WIRED);
-               page->busy = false;
-
                DEBUG_PAGE_ACCESS_END(page);
 
                pgtable = page->physical_page_number * B_PAGE_SIZE;

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-29 15:20:45 UTC (rev 35332)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-29 15:54:40 UTC (rev 35333)
@@ -422,12 +422,8 @@
                vm_page *page;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
+               page = vm_page_allocate_page(PAGE_STATE_WIRED | 
VM_PAGE_ALLOC_CLEAR);
 
-               // mark the page WIRED
-               vm_page_set_state(page, PAGE_STATE_WIRED);
-               page->busy = false;
-
                DEBUG_PAGE_ACCESS_END(page);
 
                pgtable = page->physical_page_number * B_PAGE_SIZE;

Modified: haiku/trunk/src/system/kernel/cache/file_cache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-29 15:20:45 UTC 
(rev 35332)
+++ haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-29 15:54:40 UTC 
(rev 35333)
@@ -157,7 +157,8 @@
        // allocate pages for the cache and mark them busy
        uint32 i = 0;
        for (size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
-               vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
+               vm_page* page = vm_page_allocate_page(
+                       PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
 
                fCache->InsertPage(page, fOffset + pos);
 
@@ -385,7 +386,7 @@
        // allocate pages for the cache and mark them busy
        for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
                vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
-                       PAGE_STATE_FREE);
+                       PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
 
                cache->InsertPage(page, offset + pos);
 
@@ -508,7 +509,7 @@
                // TODO: the pages we allocate here should have been reserved 
upfront
                //      in cache_io()
                vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
-                       PAGE_STATE_FREE);
+                       PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
 
                ref->cache->InsertPage(page, offset + pos);
 
@@ -1074,7 +1075,8 @@
 {
        // allocate a clean page we can use for writing zeroes
        vm_page_reserve_pages(1, VM_PRIORITY_SYSTEM);
-       vm_page* page = vm_page_allocate_page(PAGE_STATE_CLEAR);
+       vm_page* page = vm_page_allocate_page(
+               PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
        vm_page_unreserve_pages(1);
 
        sZeroPage = (addr_t)page->physical_page_number * B_PAGE_SIZE;

Modified: haiku/trunk/src/system/kernel/slab/MemoryManager.cpp
===================================================================
--- haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-01-29 
15:20:45 UTC (rev 35332)
+++ haiku/trunk/src/system/kernel/slab/MemoryManager.cpp        2010-01-29 
15:54:40 UTC (rev 35333)
@@ -1349,10 +1349,8 @@
        addr_t endAreaOffset = areaOffset + size;
        for (size_t offset = areaOffset; offset < endAreaOffset;
                        offset += B_PAGE_SIZE) {
-               vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
+               vm_page* page = vm_page_allocate_page(PAGE_STATE_WIRED);
                cache->InsertPage(page, offset);
-               vm_page_set_state(page, PAGE_STATE_WIRED);
-               page->busy = false;
 
                page->wired_count++;
                atomic_add(&gMappedPagesCount, 1);

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-29 15:20:45 UTC (rev 
35332)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-29 15:54:40 UTC (rev 
35333)
@@ -920,8 +920,8 @@
        bool isStack = (protection & B_STACK_AREA) != 0;
        page_num_t guardPages;
        bool canOvercommit = false;
-       uint32 newPageState = (flags & CREATE_AREA_DONT_CLEAR) != 0
-               ? PAGE_STATE_FREE : PAGE_STATE_CLEAR;
+       uint32 pageAllocFlags = (flags & CREATE_AREA_DONT_CLEAR) == 0
+               ? VM_PAGE_ALLOC_CLEAR : 0;
 
        TRACE(("create_anonymous_area [%ld] %s: size 0x%lx\n", team, name, 
size));
 
@@ -1049,8 +1049,8 @@
        if (wiring == B_CONTIGUOUS) {
                // we try to allocate the page run here upfront as this may 
easily
                // fail for obvious reasons
-               page = vm_page_allocate_page_run(newPageState, physicalAddress,
-                       size / B_PAGE_SIZE, priority);
+               page = vm_page_allocate_page_run(PAGE_STATE_WIRED | 
pageAllocFlags,
+                       physicalAddress, size / B_PAGE_SIZE, priority);
                if (page == NULL) {
                        status = B_NO_MEMORY;
                        goto err0;
@@ -1122,10 +1122,9 @@
 #      endif
                                        continue;
 #endif
-                               vm_page* page = 
vm_page_allocate_page(newPageState);
-                               cache->InsertPage(page, offset);
-                               map_page(area, page, address, protection);
-//                             vm_page_set_state(page, PAGE_STATE_WIRED);
+                               vm_page* page = vm_page_allocate_page(
+                                       PAGE_STATE_ACTIVE | pageAllocFlags);
+//                                     PAGE_STATE_WIRED | pageAllocFlags);
                                        // TODO: The pages should be 
PAGE_STATE_WIRED, since there's
                                        // no need for the page daemon to play 
with them (the same
                                        // should be considered in 
vm_soft_fault()). ATM doing that
@@ -1134,7 +1133,8 @@
                                        // will age pages way too fast (since 
it just skips
                                        // PAGE_STATE_WIRED pages, while it 
processes
                                        // PAGE_STATE_ACTIVE with wired_count > 
0).
-                               page->busy = false;
+                               cache->InsertPage(page, offset);
+                               map_page(area, page, address, protection);
 
                                DEBUG_PAGE_ACCESS_END(page);
 
@@ -1217,8 +1217,6 @@
 
                                increment_page_wired_count(page);
                                cache->InsertPage(page, offset);
-                               vm_page_set_state(page, PAGE_STATE_WIRED);
-                               page->busy = false;
 
                                DEBUG_PAGE_ACCESS_END(page);
                        }
@@ -3720,7 +3718,8 @@
                // see if the backing store has it
                if (cache->HasPage(context.cacheOffset)) {
                        // insert a fresh page and mark it busy -- we're going 
to read it in
-                       page = vm_page_allocate_page(PAGE_STATE_FREE);
+                       page = vm_page_allocate_page(
+                               PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
                        cache->InsertPage(page, context.cacheOffset);
 
                        // We need to unlock all caches and the address space 
while reading
@@ -3774,8 +3773,7 @@
                cache = context.isWrite ? context.topCache : lastCache;
 
                // allocate a clean page
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
-               page->busy = false;
+               page = vm_page_allocate_page(PAGE_STATE_ACTIVE | 
VM_PAGE_ALLOC_CLEAR);
                FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
                        page->physical_page_number));
 
@@ -3789,8 +3787,7 @@
                // TODO: If memory is low, it might be a good idea to steal the 
page
                // from our source cache -- if possible, that is.
                FTRACE(("get new page, copy it, and put it into the topmost 
cache\n"));
-               page = vm_page_allocate_page(PAGE_STATE_FREE);
-               page->busy = false;
+               page = vm_page_allocate_page(PAGE_STATE_ACTIVE);
 
                // To not needlessly kill concurrency we unlock all caches but 
the top
                // one while copying the page. Lacking another mechanism to 
ensure that

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-29 15:20:45 UTC 
(rev 35332)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-29 15:54:40 UTC 
(rev 35333)
@@ -73,12 +73,15 @@
 
 int32 gMappedPagesCount;
 
-static VMPageQueue sFreePageQueue;
-static VMPageQueue sClearPageQueue;
-static VMPageQueue sModifiedPageQueue;
-static VMPageQueue sInactivePageQueue;
-static VMPageQueue sActivePageQueue;
+static VMPageQueue sPageQueues[PAGE_STATE_COUNT];
 
+static VMPageQueue& sFreePageQueue = sPageQueues[PAGE_STATE_FREE];
+static VMPageQueue& sClearPageQueue = sPageQueues[PAGE_STATE_CLEAR];
+static VMPageQueue& sModifiedPageQueue = sPageQueues[PAGE_STATE_MODIFIED];
+static VMPageQueue& sInactivePageQueue = sPageQueues[PAGE_STATE_INACTIVE];
+static VMPageQueue& sActivePageQueue = sPageQueues[PAGE_STATE_ACTIVE];
+static VMPageQueue& sCachedPageQueue = sPageQueues[PAGE_STATE_CACHED];
+
 static vm_page *sPages;
 static addr_t sPhysicalPageOffset;
 static size_t sNumPages;
@@ -295,6 +298,7 @@
                { "modified",   &sModifiedPageQueue },
                { "active",             &sActivePageQueue },
                { "inactive",   &sInactivePageQueue },
+               { "cached",             &sCachedPageQueue },
                { NULL, NULL }
        };
 
@@ -336,6 +340,8 @@
                        return "inactive";
                case PAGE_STATE_MODIFIED:
                        return "modified";
+               case PAGE_STATE_CACHED:
+                       return "cached";
                case PAGE_STATE_FREE:
                        return "free";
                case PAGE_STATE_CLEAR:
@@ -494,6 +500,8 @@
                queue = &sActivePageQueue;
        else if (!strcmp(argv[1], "inactive"))
                queue = &sInactivePageQueue;
+       else if (!strcmp(argv[1], "cached"))
+               queue = &sCachedPageQueue;
        else {
                kprintf("page_queue: unknown queue \"%s\".\n", argv[1]);
                return 0;
@@ -573,6 +581,8 @@
                counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]);
        kprintf("inactive: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
                counter[PAGE_STATE_INACTIVE], busyCounter[PAGE_STATE_INACTIVE]);
+       kprintf("cached: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
+               counter[PAGE_STATE_CACHED], busyCounter[PAGE_STATE_CACHED]);
        kprintf("unused: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
                counter[PAGE_STATE_UNUSED], busyCounter[PAGE_STATE_UNUSED]);
        kprintf("wired: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
@@ -598,6 +608,8 @@
                sActivePageQueue.Count());
        kprintf("inactive queue: %p, count = %ld\n", &sInactivePageQueue,
                sInactivePageQueue.Count());
+       kprintf("cached queue: %p, count = %ld\n", &sCachedPageQueue,
+               sCachedPageQueue.Count());
        return 0;
 }
 
@@ -645,6 +657,9 @@
                case PAGE_STATE_MODIFIED:
                        fromQueue = &sModifiedPageQueue;
                        break;
+               case PAGE_STATE_CACHED:
+                       fromQueue = &sCachedPageQueue;
+                       break;
                case PAGE_STATE_FREE:
                case PAGE_STATE_CLEAR:
                        panic("free_page(): page %p already free", page);
@@ -717,6 +732,9 @@
                case PAGE_STATE_MODIFIED:
                        fromQueue = &sModifiedPageQueue;
                        break;
+               case PAGE_STATE_CACHED:
+                       fromQueue = &sCachedPageQueue;
+                       break;
                case PAGE_STATE_FREE:
                case PAGE_STATE_CLEAR:
                        panic("set_page_state(): page %p is free/clear", page);
@@ -743,6 +761,9 @@
                case PAGE_STATE_MODIFIED:
                        toQueue = &sModifiedPageQueue;
                        break;
+               case PAGE_STATE_CACHED:
+                       toQueue = &sCachedPageQueue;
+                       break;
                case PAGE_STATE_FREE:
                case PAGE_STATE_CLEAR:
                        panic("set_page_state(): target state is free/clear");
@@ -809,6 +830,7 @@
                state = PAGE_STATE_ACTIVE;
        else
                state = PAGE_STATE_INACTIVE;
+// TODO: Cached queue!
 
        if (dequeued) {
                page->state = state;
@@ -928,6 +950,9 @@
                case PAGE_STATE_MODIFIED:
                        queue = &sModifiedPageQueue;
                        break;
+               case PAGE_STATE_CACHED:
+                       queue = &sCachedPageQueue;
+                       break;
 
                default:
                        return;
@@ -1919,6 +1944,7 @@
        sModifiedPageQueue.Init("modified pages queue");
        sInactivePageQueue.Init("inactive pages queue");
        sActivePageQueue.Init("active pages queue");
+       sCachedPageQueue.Init("cached pages queue");
        sFreePageQueue.Init("free pages queue");
        sClearPageQueue.Init("clear pages queue");
 
@@ -2077,6 +2103,7 @@
                        case PAGE_STATE_ACTIVE:
                        case PAGE_STATE_INACTIVE:
                        case PAGE_STATE_MODIFIED:
+                       case PAGE_STATE_CACHED:
                        case PAGE_STATE_UNUSED:
                        default:
                                // uh
@@ -2279,22 +2306,21 @@
 
 
 vm_page *
-vm_page_allocate_page(int pageState)
+vm_page_allocate_page(uint32 flags)
 {
+       uint32 pageState = flags & VM_PAGE_ALLOC_STATE;
+       ASSERT(pageState != PAGE_STATE_FREE);
+       ASSERT(pageState != PAGE_STATE_CLEAR);
+
        VMPageQueue* queue;
        VMPageQueue* otherQueue;
 
-       switch (pageState) {
-               case PAGE_STATE_FREE:
-                       queue = &sFreePageQueue;
-                       otherQueue = &sClearPageQueue;
-                       break;
-               case PAGE_STATE_CLEAR:
-                       queue = &sClearPageQueue;
-                       otherQueue = &sFreePageQueue;
-                       break;
-               default:
-                       return NULL; // invalid
+       if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
+               queue = &sClearPageQueue;
+               otherQueue = &sFreePageQueue;
+       } else {
+               queue = &sFreePageQueue;
+               otherQueue = &sClearPageQueue;
        }
 
        atomic_add(&sUnreservedFreePages, -1);
@@ -2336,19 +2362,20 @@
        DEBUG_PAGE_ACCESS_START(page);
 
        int oldPageState = page->state;
-       page->state = PAGE_STATE_ACTIVE;
-       page->busy = true;
+       page->state = pageState;
+       page->busy = (flags & VM_PAGE_ALLOC_BUSY) != 0;
        page->usage_count = 2;
        page->accessed = false;
        page->modified = false;
 
        locker.Unlock();
 
-       sActivePageQueue.AppendUnlocked(page);
+       if (pageState < PAGE_STATE_FIRST_UNQUEUED)
+               sPageQueues[pageState].AppendUnlocked(page);
 
        // clear the page, if we had to take it from the free queue and a clear
        // page was requested
-       if (pageState == PAGE_STATE_CLEAR && oldPageState != PAGE_STATE_CLEAR)
+       if ((flags & VM_PAGE_ALLOC_CLEAR) != 0 && oldPageState != 
PAGE_STATE_CLEAR)
                clear_page(page);
 
        return page;
@@ -2356,9 +2383,13 @@
 
 
 static vm_page*
-allocate_page_run(page_num_t start, page_num_t length, int pageState,
+allocate_page_run(page_num_t start, page_num_t length, uint32 flags,
        WriteLocker& freeClearQueueLocker)
 {
+       uint32 pageState = flags & VM_PAGE_ALLOC_STATE;
+       ASSERT(pageState != PAGE_STATE_FREE);
+       ASSERT(pageState != PAGE_STATE_CLEAR);
+
        T(AllocatePageRun(length));
 
        // pull the pages out of the appropriate queues
@@ -2375,8 +2406,8 @@
                        freePages.Add(&page);
                }
 
-               page.state = PAGE_STATE_ACTIVE;
-               page.busy = true;
+               page.state = flags & VM_PAGE_ALLOC_STATE;
+               page.busy = flags & VM_PAGE_ALLOC_BUSY;
                page.usage_count = 1;
                page.accessed = false;
                page.modified = false;
@@ -2385,16 +2416,18 @@
        freeClearQueueLocker.Unlock();
 
        // clear pages, if requested
-       if (pageState == PAGE_STATE_CLEAR) {
+       if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
                for (VMPageQueue::PageList::Iterator it = 
freePages.GetIterator();
                                vm_page* page = it.Next();) {
                        clear_page(page);
                }
        }
 
-       // add pages to active queue
-       freePages.MoveFrom(&clearPages);
-       sActivePageQueue.AppendUnlocked(freePages, length);
+       // add pages to target queue
+       if (pageState < PAGE_STATE_FIRST_UNQUEUED) {
+               freePages.MoveFrom(&clearPages);
+               sPageQueues[pageState].AppendUnlocked(freePages, length);
+       }
 
        // Note: We don't unreserve the pages since we pulled them out of the
        // free/clear queues without adjusting sUnreservedFreePages.
@@ -2404,7 +2437,7 @@
 
 
 vm_page *
-vm_page_allocate_page_run(int pageState, addr_t base, addr_t length,
+vm_page_allocate_page_run(uint32 flags, addr_t base, addr_t length,
        int priority)
 {
        uint32 start = base >> PAGE_SHIFT;
@@ -2435,7 +2468,7 @@
                }
 
                if (foundRun)
-                       return allocate_page_run(start, length, pageState,
+                       return allocate_page_run(start, length, flags,
                                freeClearQueueLocker);
 
                start += i;
@@ -2444,21 +2477,16 @@
 
 
 vm_page *
-vm_page_allocate_page_run_no_base(int pageState, addr_t count, int priority)
+vm_page_allocate_page_run_no_base(uint32 flags, addr_t count, int priority)
 {
        VMPageQueue* queue;
        VMPageQueue* otherQueue;
-       switch (pageState) {
-               case PAGE_STATE_FREE:
-                       queue = &sFreePageQueue;
-                       otherQueue = &sClearPageQueue;
-                       break;
-               case PAGE_STATE_CLEAR:
-                       queue = &sClearPageQueue;
-                       otherQueue = &sFreePageQueue;
-                       break;
-               default:
-                       return NULL; // invalid
+       if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
+               queue = &sClearPageQueue;
+               otherQueue = &sFreePageQueue;
+       } else {
+               queue = &sFreePageQueue;
+               otherQueue = &sClearPageQueue;
        }
 
        if (!vm_page_try_reserve_pages(count, priority))
@@ -2485,7 +2513,7 @@
                        }
 
                        if (foundRun) {
-                               return allocate_page_run(page - sPages, count, 
pageState,
+                               return allocate_page_run(page - sPages, count, 
flags,
                                        freeClearQueueLocker);
                        }
                }
@@ -2578,6 +2606,9 @@
                case PAGE_STATE_MODIFIED:
                        queue = &sModifiedPageQueue;
                        break;
+               case PAGE_STATE_CACHED:
+                       queue = &sCachedPageQueue;
+                       break;
                case PAGE_STATE_FREE:
                case PAGE_STATE_CLEAR:
                        panic("vm_page_requeue() called for free/clear page 
%p", page);


Other related posts:

  • » [haiku-commits] r35333 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ... - ingo_weinhold