[haiku-commits] r34836 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ...

  • From: ingo_weinhold@xxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Fri, 1 Jan 2010 17:37:14 +0100 (CET)

Author: bonefish
Date: 2010-01-01 17:37:14 +0100 (Fri, 01 Jan 2010)
New Revision: 34836
Changeset: http://dev.haiku-os.org/changeset/34836/haiku

Modified:
   haiku/trunk/headers/private/kernel/vm/vm_page.h
   haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
   haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
   haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
   haiku/trunk/src/system/kernel/cache/file_cache.cpp
   haiku/trunk/src/system/kernel/vm/VMCache.cpp
   haiku/trunk/src/system/kernel/vm/vm.cpp
   haiku/trunk/src/system/kernel/vm/vm_page.cpp
Log:
* agp_gart(): Use vm_page_[un]reserve_pages().
* Removed unused vm_page_allocate_pages().
* Removed now unused (always true) "reserved" parameter from
  vm_page_allocate_page().
* Removed unused (always false) "stealActive" parameter from steal_page().


Modified: haiku/trunk/headers/private/kernel/vm/vm_page.h
===================================================================
--- haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-01 14:54:41 UTC 
(rev 34835)
+++ haiku/trunk/headers/private/kernel/vm/vm_page.h     2010-01-01 16:37:14 UTC 
(rev 34836)
@@ -49,9 +49,7 @@
 void vm_page_reserve_pages(uint32 count);
 bool vm_page_try_reserve_pages(uint32 count);
 
-struct vm_page *vm_page_allocate_page(int pageState, bool reserved);
-status_t vm_page_allocate_pages(int pageState, struct vm_page **pages,
-       uint32 numPages);
+struct vm_page *vm_page_allocate_page(int pageState);
 struct vm_page *vm_page_allocate_page_run(int state, addr_t base,
        addr_t length);
 struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count);

Modified: haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp
===================================================================
--- haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-01 14:54:41 UTC (rev 34835)
+++ haiku/trunk/src/add-ons/kernel/bus_managers/agp_gart/agp_gart.cpp   
2010-01-01 16:37:14 UTC (rev 34836)
@@ -545,18 +545,10 @@
                if (memory->pages == NULL)
                        return B_NO_MEMORY;
 
-               for (uint32 i = 0; i < count; i++) {
-                       memory->pages[i] = 
vm_page_allocate_page(PAGE_STATE_CLEAR, false);
-                       if (memory->pages[i] == NULL) {
-                               // Free pages we already allocated
-                               while (i-- > 0) {
-                                       vm_page_set_state(memory->pages[i], 
PAGE_STATE_CLEAR);
-                               }
-                               free(memory->pages);
-                               memory->pages = NULL;
-                               return B_NO_MEMORY;
-                       }
-               }
+               vm_page_reserve_pages(count);
+               for (uint32 i = 0; i < count; i++)
+                       memory->pages[i] = 
vm_page_allocate_page(PAGE_STATE_CLEAR);
+               vm_page_unreserve_pages(count);
        }
 #else
        void *address;

Modified: 
haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-01 14:54:41 UTC (rev 34835)
+++ haiku/trunk/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp    
2010-01-01 16:37:14 UTC (rev 34836)
@@ -540,7 +540,7 @@
                unsigned int i;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
+               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);
@@ -586,7 +586,7 @@
                unsigned int i;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
+               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);

Modified: haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp
===================================================================
--- haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-01 14:54:41 UTC (rev 34835)
+++ haiku/trunk/src/system/kernel/arch/x86/arch_vm_translation_map.cpp  
2010-01-01 16:37:14 UTC (rev 34836)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008, Ingo Weinhold, ingo_weinhold@xxxxxxx
+ * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@xxxxxxx
  * Copyright 2002-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx All rights 
reserved.
  * Distributed under the terms of the MIT License.
  *
@@ -364,7 +364,7 @@
                vm_page *page;
 
                // we need to allocate a pgtable
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
+               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
 
                // mark the page WIRED
                vm_page_set_state(page, PAGE_STATE_WIRED);

Modified: haiku/trunk/src/system/kernel/cache/file_cache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-01 14:54:41 UTC 
(rev 34835)
+++ haiku/trunk/src/system/kernel/cache/file_cache.cpp  2010-01-01 16:37:14 UTC 
(rev 34836)
@@ -149,7 +149,7 @@
        // allocate pages for the cache and mark them busy
        uint32 i = 0;
        for (size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
-               vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE, true);
+               vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
                if (page == NULL)
                        break;
 
@@ -381,7 +381,7 @@
        // allocate pages for the cache and mark them busy
        for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
                vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
-                       PAGE_STATE_FREE, true);
+                       PAGE_STATE_FREE);
                if (page == NULL)
                        panic("no more pages!");
 
@@ -506,7 +506,7 @@
                // TODO: the pages we allocate here should have been reserved 
upfront
                //      in cache_io()
                vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
-                       PAGE_STATE_FREE, true);
+                       PAGE_STATE_FREE);
 
                ref->cache->InsertPage(page, offset + pos);
 

Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-01 14:54:41 UTC 
(rev 34835)
+++ haiku/trunk/src/system/kernel/vm/VMCache.cpp        2010-01-01 16:37:14 UTC 
(rev 34836)
@@ -515,9 +515,6 @@
                        return NULL;
                }
 
-               // TODO: this is problematic, as it requires the caller not to 
have
-               // a lock on this cache (it might be called via
-               // vm_page_allocate_page(..., false)).
                if (!cache->SwitchLock(&sCacheListLock)) {
                        // cache has been deleted
                        mutex_lock(&sCacheListLock);

Modified: haiku/trunk/src/system/kernel/vm/vm.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-01 14:54:41 UTC (rev 
34835)
+++ haiku/trunk/src/system/kernel/vm/vm.cpp     2010-01-01 16:37:14 UTC (rev 
34836)
@@ -871,7 +871,7 @@
 #      endif
                                        continue;
 #endif
-                               vm_page* page = 
vm_page_allocate_page(newPageState, true);
+                               vm_page* page = 
vm_page_allocate_page(newPageState);
                                cache->InsertPage(page, offset);
                                vm_map_page(area, page, address, protection);
 
@@ -3671,7 +3671,7 @@
                // see if the backing store has it
                if (cache->HasPage(context.cacheOffset)) {
                        // insert a fresh page and mark it busy -- we're going 
to read it in
-                       page = vm_page_allocate_page(PAGE_STATE_FREE, true);
+                       page = vm_page_allocate_page(PAGE_STATE_FREE);
                        cache->InsertPage(page, context.cacheOffset);
 
                        // We need to unlock all caches and the address space 
while reading
@@ -3724,7 +3724,7 @@
                cache = context.isWrite ? context.topCache : lastCache;
 
                // allocate a clean page
-               page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
+               page = vm_page_allocate_page(PAGE_STATE_CLEAR);
                FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
                        page->physical_page_number));
 
@@ -3739,7 +3739,7 @@
                // TODO: If memory is low, it might be a good idea to steal the 
page
                // from our source cache -- if possible, that is.
                FTRACE(("get new page, copy it, and put it into the topmost 
cache\n"));
-               page = vm_page_allocate_page(PAGE_STATE_FREE, true);
+               page = vm_page_allocate_page(PAGE_STATE_FREE);
 
                // copy the page
                vm_memcpy_physical_page(page->physical_page_number * 
B_PAGE_SIZE,

Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp
===================================================================
--- haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-01 14:54:41 UTC 
(rev 34835)
+++ haiku/trunk/src/system/kernel/vm/vm_page.cpp        2010-01-01 16:37:14 UTC 
(rev 34836)
@@ -127,9 +127,7 @@
 
 class AllocatePage : public AbstractTraceEntry {
        public:
-               AllocatePage(bool reserved)
-                       :
-                       fReserved(reserved)
+               AllocatePage()
                {
                        Initialized();
                }
@@ -137,12 +135,7 @@
                virtual void AddDump(TraceOutput& out)
                {
                        out.Print("page alloc");
-                       if (fReserved)
-                               out.Print(" reserved");
                }
-
-       private:
-               bool            fReserved;
 };
 
 
@@ -1511,7 +1504,7 @@
 
 
 static vm_page *
-find_page_candidate(struct vm_page &marker, bool stealActive)
+find_page_candidate(struct vm_page &marker)
 {
        MutexLocker locker(sPageLock);
        page_queue *queue;
@@ -1521,10 +1514,6 @@
                // Get the first free pages of the (in)active queue
                queue = &sInactivePageQueue;
                page = sInactivePageQueue.head;
-               if (page == NULL && stealActive) {
-                       queue = &sActivePageQueue;
-                       page = sActivePageQueue.head;
-               }
        } else {
                // Get the next page of the current queue
                if (marker.state == PAGE_STATE_INACTIVE)
@@ -1542,10 +1531,7 @@
        }
 
        while (page != NULL) {
-               if (!is_marker_page(page)
-                       && (page->state == PAGE_STATE_INACTIVE
-                               || (stealActive && page->state == 
PAGE_STATE_ACTIVE
-                                       && page->wired_count == 0))) {
+               if (!is_marker_page(page) && page->state == 
PAGE_STATE_INACTIVE) {
                        // we found a candidate, insert marker
                        marker.state = queue == &sActivePageQueue
                                ? PAGE_STATE_ACTIVE : PAGE_STATE_INACTIVE;
@@ -1554,10 +1540,6 @@
                }
 
                page = page->queue_next;
-               if (page == NULL && stealActive && queue != &sActivePageQueue) {
-                       queue = &sActivePageQueue;
-                       page = sActivePageQueue.head;
-               }
        }
 
        return NULL;
@@ -1565,7 +1547,7 @@
 
 
 static bool
-steal_page(vm_page *page, bool stealActive)
+steal_page(vm_page *page)
 {
        // try to lock the page's cache
        if (vm_cache_acquire_locked_page_cache(page, false) == NULL)
@@ -1575,9 +1557,7 @@
        MethodDeleter<VMCache> _2(page->cache, &VMCache::ReleaseRefLocked);
 
        // check again if that page is still a candidate
-       if (page->state != PAGE_STATE_INACTIVE
-               && (!stealActive || page->state != PAGE_STATE_ACTIVE
-                       || page->wired_count != 0))
+       if (page->state != PAGE_STATE_INACTIVE)
                return false;
 
        // recheck eventual last minute changes
@@ -1608,10 +1588,8 @@
 
 
 static size_t
-steal_pages(vm_page **pages, size_t count, bool reserve)
+steal_pages(vm_page **pages, size_t count)
 {
-       size_t maxCount = count;
-
        while (true) {
                vm_page marker;
                marker.type = PAGE_TYPE_DUMMY;
@@ -1622,19 +1600,17 @@
                size_t stolen = 0;
 
                while (count > 0) {
-                       vm_page *page = find_page_candidate(marker, false);
+                       vm_page *page = find_page_candidate(marker);
                        if (page == NULL)
                                break;
 
-                       if (steal_page(page, false)) {
-                               if (reserve || stolen >= maxCount) {
-                                       MutexLocker _(sPageLock);
-                                       enqueue_page(&sFreePageQueue, page);
-                                       page->state = PAGE_STATE_FREE;
+                       if (steal_page(page)) {
+                               MutexLocker locker(sPageLock);
+                               enqueue_page(&sFreePageQueue, page);
+                               page->state = PAGE_STATE_FREE;
+                               locker.Unlock();
 
-                                       T(StolenPage());
-                               } else if (stolen < maxCount)
-                                       pages[stolen] = page;
+                               T(StolenPage());
 
                                stolen++;
                                count--;
@@ -1646,11 +1622,11 @@
 
                MutexLocker locker(sPageLock);
 
-               if ((reserve && sReservedPages <= free_page_queue_count())
+               if (sReservedPages <= free_page_queue_count()
                        || count == 0
-                       || ((!reserve && (sInactivePageQueue.count > 0))
-                               || free_page_queue_count() > sReservedPages))
+                       || free_page_queue_count() > sReservedPages) {
                        return stolen;
+               }
 
                if (stolen && !tried && sInactivePageQueue.count > 0) {
                        count++;
@@ -1681,7 +1657,7 @@
                locker.Lock();
                sPageDeficit--;
 
-               if (reserve && sReservedPages <= free_page_queue_count())
+               if (sReservedPages <= free_page_queue_count())
                        return stolen;
        }
 }
@@ -2046,6 +2022,7 @@
        They will only be handed out to someone who has actually reserved them.
        This call returns as soon as the number of requested pages has been
        reached.
+       The caller must not hold any cache lock or the function might deadlock.
 */
 void
 vm_page_reserve_pages(uint32 count)
@@ -2065,7 +2042,7 @@
        count = sReservedPages - freePages;
        locker.Unlock();
 
-       steal_pages(NULL, count + 1, true);
+       steal_pages(NULL, count + 1);
                // we get one more, just in case we can do something someone
                // else can't
 }
@@ -2090,10 +2067,8 @@
 }
 
 
-// TODO: you must not have locked a cache when calling this function with
-// reserved == false. See vm_cache_acquire_locked_page_cache().
 vm_page *
-vm_page_allocate_page(int pageState, bool reserved)
+vm_page_allocate_page(int pageState)
 {
        page_queue *queue;
        page_queue *otherQueue;
@@ -2113,37 +2088,21 @@
 
        MutexLocker locker(sPageLock);
 
-       T(AllocatePage(reserved));
+       T(AllocatePage());
 
-       vm_page *page = NULL;
-       while (true) {
-               if (reserved || sReservedPages < free_page_queue_count()) {
-                       page = dequeue_page(queue);
-                       if (page == NULL) {
+       vm_page *page = dequeue_page(queue);
+       if (page == NULL) {
 #if DEBUG_PAGE_QUEUE
-                               if (queue->count != 0)
-                                       panic("queue %p corrupted, count = 
%d\n", queue, queue->count);
+               if (queue->count != 0)
+                       panic("queue %p corrupted, count = %d\n", queue, 
queue->count);
 #endif
 
-                               // if the primary queue was empty, grab the 
page from the
-                               // secondary queue
-                               page = dequeue_page(otherQueue);
-                       }
-               }
+               // if the primary queue was empty, grab the page from the
+               // secondary queue
+               page = dequeue_page(otherQueue);
 
-               if (page != NULL)
-                       break;
-
-               if (reserved)
+               if (page == NULL)
                        panic("Had reserved page, but there is none!");
-
-               // steal one from the inactive list
-               locker.Unlock();
-               size_t stolen = steal_pages(&page, 1, false);
-               locker.Lock();
-
-               if (stolen > 0)
-                       break;
        }
 
        if (page->cache != NULL)
@@ -2165,31 +2124,6 @@
 }
 
 
-/*!    Allocates a number of pages and puts their pointers into the provided
-       array. All pages are marked busy.
-       Returns B_OK on success, and B_NO_MEMORY when there aren't any free
-       pages left to allocate.
-*/
-status_t
-vm_page_allocate_pages(int pageState, vm_page **pages, uint32 numPages)
-{
-       uint32 i;
-
-       for (i = 0; i < numPages; i++) {
-               pages[i] = vm_page_allocate_page(pageState, false);
-               if (pages[i] == NULL) {
-                       // allocation failed, we need to free what we already 
have
-                       while (i-- > 0)
-                               vm_page_set_state(pages[i], pageState);
-
-                       return B_NO_MEMORY;
-               }
-       }
-
-       return B_OK;
-}
-
-
 vm_page *
 vm_page_allocate_page_run(int pageState, addr_t base, addr_t length)
 {


Other related posts:

  • » [haiku-commits] r34836 - in haiku/trunk: headers/private/kernel/vm src/add-ons/kernel/bus_managers/agp_gart src/system/kernel/arch/m68k src/system/kernel/arch/x86 src/system/kernel/cache ... - ingo_weinhold