[haiku-commits] r35423 - haiku/trunk/src/system/kernel

  • From: mmlr@xxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 7 Feb 2010 02:59:53 +0100 (CET)

Author: mmlr
Date: 2010-02-07 02:59:53 +0100 (Sun, 07 Feb 2010)
New Revision: 35423
Changeset: http://dev.haiku-os.org/changeset/35423/haiku

Modified:
   haiku/trunk/src/system/kernel/heap.cpp
Log:
Sync kernel heap with alignment handling fixes, simplifications and performance
improvements from debug heap in r35422.


Modified: haiku/trunk/src/system/kernel/heap.cpp
===================================================================
--- haiku/trunk/src/system/kernel/heap.cpp      2010-02-07 01:09:27 UTC (rev 
35422)
+++ haiku/trunk/src/system/kernel/heap.cpp      2010-02-07 01:59:53 UTC (rev 
35423)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009, Michael Lotz, mmlr@xxxxxxxxx
+ * Copyright 2008-2010, Michael Lotz, mmlr@xxxxxxxxx
  * Copyright 2002-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxxx
  * Distributed under the terms of the MIT License.
  *
@@ -1308,31 +1308,47 @@
 
 
 static heap_page *
-heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount)
+heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount,
+       size_t alignment)
 {
        MutexLocker pageLocker(heap->page_lock);
        heap_area *area = heap->areas;
        while (area) {
-               bool found = false;
+               if (area->free_page_count < pageCount) {
+                       area = area->next;
+                       continue;
+               }
+
+               uint32 step = 1;
+               uint32 firstValid = 0;
+               const uint32 lastValid = area->page_count - pageCount + 1;
+
+               if (alignment > heap->page_size) {
+                       firstValid = (ROUNDUP(area->base, alignment) - 
area->base)
+                               / heap->page_size;
+                       step = alignment / heap->page_size;
+               }
+
                int32 first = -1;
-               for (uint32 i = 0; i < area->page_count; i++) {
-                       if (area->page_table[i].in_use) {
-                               first = -1;
+               for (uint32 i = firstValid; i < lastValid; i += step) {
+                       if (area->page_table[i].in_use)
                                continue;
-                       }
 
-                       if (first < 0)
-                               first = i;
+                       first = i;
 
-                       if (first >= 0) {
-                               if ((i + 1 - first) == pageCount) {
-                                       found = true;
+                       for (uint32 j = 1; j < pageCount; j++) {
+                               if (area->page_table[i + j].in_use) {
+                                       first = -1;
+                                       i += j;
                                        break;
                                }
                        }
+
+                       if (first >= 0)
+                               break;
                }
 
-               if (!found) {
+               if (first < 0) {
                        area = area->next;
                        continue;
                }
@@ -1357,111 +1373,120 @@
 }
 
 
-static void *
-heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
+#if KERNEL_HEAP_LEAK_CHECK
+static void
+heap_add_leak_check_info(addr_t address, size_t allocated, size_t size)
 {
-       TRACE(("raw_alloc: heap %p; size %lu; bin %lu\n", heap, size, 
binIndex));
-       if (binIndex < heap->bin_count) {
-               heap_bin *bin = &heap->bins[binIndex];
-               MutexLocker binLocker(bin->lock);
+       heap_leak_check_info *info = (heap_leak_check_info *)(address + 
allocated
+               - sizeof(heap_leak_check_info));
+       info->size = size - sizeof(heap_leak_check_info);
+       info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
+       info->team = (gKernelStartup ? 0 : team_get_current_team_id());
+       info->caller = heap->get_caller();
+}
+#endif
 
-               heap_page *page = bin->page_list;
-               if (page == NULL) {
-                       MutexLocker pageLocker(heap->page_lock);
-                       heap_area *area = heap->areas;
-                       if (area == NULL) {
-                               TRACE(("heap %p: no free pages to allocate %lu 
bytes\n", heap,
-                                       size));
-                               return NULL;
-                       }
 
-                       // by design there are only areas in the list that 
still have
-                       // free pages available
-                       page = area->free_pages;
-                       area->free_pages = page->next;
-                       if (page->next)
-                               page->next->prev = NULL;
+static void *
+heap_raw_alloc(heap_allocator *heap, size_t size, size_t alignment)
+{
+       TRACE(("heap %p: allocate %lu bytes from raw pages with alignment 
%lu\n",
+               heap, size, alignment));
 
-                       heap_free_pages_removed(heap, area, 1);
+       uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
+       heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount,
+               alignment);
+       if (firstPage == NULL) {
+               TRACE(("heap %p: found no contiguous pages to allocate %ld 
bytes\n",
+                       heap, size));
+               return NULL;
+       }
 
-                       if (page->in_use)
-                               panic("got an in use page from the free pages 
list\n");
-                       page->in_use = 1;
+       addr_t address = firstPage->area->base + firstPage->index * 
heap->page_size;
+#if KERNEL_HEAP_LEAK_CHECK
+       heap_add_leak_check_info(address, pageCount * heap->page_size, size);
+#endif
+       return (void *)address;
+}
 
-                       pageLocker.Unlock();
 
-                       page->bin_index = binIndex;
-                       page->free_count = bin->max_free_count;
-                       page->empty_index = 0;
-                       page->free_list = NULL;
-                       page->next = page->prev = NULL;
-                       bin->page_list = page;
-               }
+static void *
+heap_allocate_from_bin(heap_allocator *heap, uint32 binIndex, size_t size)
+{
+       heap_bin *bin = &heap->bins[binIndex];
+       TRACE(("heap %p: allocate %lu bytes from bin %lu with element_size 
%lu\n",
+               heap, size, binIndex, bin->element_size));
 
-               // we have a page where we have a free slot
-               void *address = NULL;
-               if (page->free_list) {
-                       // there's a previously freed entry we can use
-                       address = page->free_list;
-                       page->free_list = (addr_t *)*page->free_list;
-               } else {
-                       // the page hasn't been fully allocated so use the next 
empty_index
-                       address = (void *)(page->area->base + page->index * 
heap->page_size
-                               + page->empty_index * bin->element_size);
-                       page->empty_index++;
+       MutexLocker binLocker(bin->lock);
+       heap_page *page = bin->page_list;
+       if (page == NULL) {
+               MutexLocker pageLocker(heap->page_lock);
+               heap_area *area = heap->areas;
+               if (area == NULL) {
+                       TRACE(("heap %p: no free pages to allocate %lu 
bytes\n", heap,
+                               size));
+                       return NULL;
                }
 
-               page->free_count--;
-               if (page->free_count == 0) {
-                       // the page is now full so we remove it from the 
page_list
-                       bin->page_list = page->next;
-                       if (page->next)
-                               page->next->prev = NULL;
-                       page->next = page->prev = NULL;
-               }
+               // by design there are only areas in the list that still have
+               // free pages available
+               page = area->free_pages;
+               area->free_pages = page->next;
+               if (page->next)
+                       page->next->prev = NULL;
 
-               binLocker.Unlock();
+               heap_free_pages_removed(heap, area, 1);
 
-#if KERNEL_HEAP_LEAK_CHECK
-               heap_leak_check_info *info = (heap_leak_check_info 
*)((addr_t)address
-                       + bin->element_size - sizeof(heap_leak_check_info));
-               info->size = size - sizeof(heap_leak_check_info);
-               info->thread = (gKernelStartup ? 0 : 
thread_get_current_thread_id());
-               info->team = (gKernelStartup ? 0 : team_get_current_team_id());
-               info->caller = heap->get_caller();
-#endif
-               return address;
+               if (page->in_use)
+                       panic("got an in use page from the free pages list\n");
+               page->in_use = 1;
+
+               pageLocker.Unlock();
+
+               page->bin_index = binIndex;
+               page->free_count = bin->max_free_count;
+               page->empty_index = 0;
+               page->free_list = NULL;
+               page->next = page->prev = NULL;
+               bin->page_list = page;
        }
 
-       uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
-       heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount);
-       if (firstPage == NULL) {
-               TRACE(("heap %p: found no contiguous pages to allocate %ld 
bytes\n",
-                       heap, size));
-               return NULL;
+       // we have a page where we have a free slot
+       void *address = NULL;
+       if (page->free_list) {
+               // there's a previously freed entry we can use
+               address = page->free_list;
+               page->free_list = (addr_t *)*page->free_list;
+       } else {
+               // the page hasn't been fully allocated so use the next 
empty_index
+               address = (void *)(page->area->base + page->index * 
heap->page_size
+                       + page->empty_index * bin->element_size);
+               page->empty_index++;
        }
 
+       page->free_count--;
+       if (page->free_count == 0) {
+               // the page is now full so we remove it from the page_list
+               bin->page_list = page->next;
+               if (page->next)
+                       page->next->prev = NULL;
+               page->next = page->prev = NULL;
+       }
+
 #if KERNEL_HEAP_LEAK_CHECK
-       heap_leak_check_info *info = (heap_leak_check_info 
*)(firstPage->area->base
-               + (firstPage->index + pageCount) * heap->page_size
-               - sizeof(heap_leak_check_info));
-       info->size = size - sizeof(heap_leak_check_info);
-       info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
-       info->team = (gKernelStartup ? 0 : team_get_current_team_id());
-       info->caller = heap->get_caller();
+       binLocker.Unlock();
+       heap_add_leak_check_info((addr_t)address, bin->element_size, size);
 #endif
-       return (void *)(firstPage->area->base + firstPage->index * 
heap->page_size);
+       return address;
 }
 
 
-#if DEBUG
 static bool
 is_valid_alignment(size_t number)
 {
        // this cryptic line accepts zero and all powers of two
        return ((~number + 1) | ((number << 1) - 1)) == ~0UL;
 }
-#endif
 
 
 inline bool
@@ -1486,24 +1511,36 @@
        size += sizeof(heap_leak_check_info);
 #endif
 
-       if (alignment != 0) {
-               // TODO: The alignment is done by ensuring that the element size
-               // of the target bin is aligned with the requested alignment. 
This
-               // has the problem that it wastes space because a better 
(smaller) bin
-               // could possibly be selected. We should pick the best bin and 
check
-               // if there is an aligned block in the free list or if a new 
(page
-               // aligned) page has to be allocated anyway.
-               size = ROUNDUP(size, alignment);
+       void *address = NULL;
+       if (alignment < B_PAGE_SIZE) {
+               if (alignment != 0) {
+                       // TODO: The alignment is done by ensuring that the 
element size
+                       // of the target bin is aligned with the requested 
alignment. This
+                       // has the problem that it wastes space because a 
better (smaller)
+                       // bin could possibly be selected. We should pick the 
best bin and
+                       // check if there is an aligned block in the free list 
or if a new
+                       // (page aligned) page has to be allocated anyway.
+                       size = ROUNDUP(size, alignment);
+                       for (uint32 i = 0; i < heap->bin_count; i++) {
+                               if (size <= heap->bins[i].element_size
+                                       && 
is_valid_alignment(heap->bins[i].element_size)) {
+                                       address = heap_allocate_from_bin(heap, 
i, size);
+                                       break;
+                               }
+                       }
+               } else {
+                       for (uint32 i = 0; i < heap->bin_count; i++) {
+                               if (size <= heap->bins[i].element_size) {
+                                       address = heap_allocate_from_bin(heap, 
i, size);
+                                       break;
+                               }
+                       }
+               }
        }
 
-       uint32 binIndex;
-       for (binIndex = 0; binIndex < heap->bin_count; binIndex++) {
-               if (size <= heap->bins[binIndex].element_size)
-                       break;
-       }
+       if (address == NULL)
+               address = heap_raw_alloc(heap, size, alignment);
 
-       void *address = heap_raw_alloc(heap, size, binIndex);
-
 #if KERNEL_HEAP_LEAK_CHECK
        size -= sizeof(heap_leak_check_info);
 #endif
@@ -2183,9 +2220,9 @@
 
                address = (void *)((addr_t)address + 
sizeof(area_allocation_info));
                if (alignment != 0) {
+                       address = (void *)ROUNDUP((addr_t)address, alignment);
                        ASSERT((addr_t)address % alignment == 0);
                        ASSERT((addr_t)address + size - 1 < (addr_t)info + 
areaSize - 1);
-                       address = (void *)ROUNDUP((addr_t)address, alignment);
                }
 
                TRACE(("heap: allocated area %ld for huge allocation of %lu 
bytes\n",


Other related posts:

  • » [haiku-commits] r35423 - haiku/trunk/src/system/kernel - mmlr