[haiku-commits] r33524 - haiku/trunk/src/system/kernel/device_manager

  • From: mmlr@xxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 11 Oct 2009 18:49:44 +0200 (CEST)

Author: mmlr
Date: 2009-10-11 18:49:44 +0200 (Sun, 11 Oct 2009)
New Revision: 33524
Changeset: http://dev.haiku-os.org/changeset/33524/haiku

Modified:
   haiku/trunk/src/system/kernel/device_manager/IORequest.cpp
   haiku/trunk/src/system/kernel/device_manager/IORequest.h
Log:
Provide a way to directly request virtual vecs from an IOBuffer. If the buffer
is virtual already it just returns the vecs directly, if it is physical it takes
over the task of virtualizing the vecs either using vm_map_physical_memory_vecs,
if there are multiple vecs or more than one page, or falls back to page wise
mapping if mapping fails or is not needed. In the best case, scattered physical
pages are mapped into one linear virtual buffer so that subsystems operating on
virtual memory only get a single vector and can then burst read/write.


Modified: haiku/trunk/src/system/kernel/device_manager/IORequest.cpp
===================================================================
--- haiku/trunk/src/system/kernel/device_manager/IORequest.cpp  2009-10-11 
16:48:03 UTC (rev 33523)
+++ haiku/trunk/src/system/kernel/device_manager/IORequest.cpp  2009-10-11 
16:49:44 UTC (rev 33524)
@@ -15,6 +15,7 @@
 #include <thread.h>
 #include <util/AutoLock.h>
 #include <vm.h>
+#include <vm_address_space.h>
 
 #include "dma_resources.h"
 
@@ -65,6 +66,15 @@
 //     #pragma mark -
 
 
+struct virtual_vec_cookie {
+       uint32  vec_index;
+       size_t  vec_offset;
+       area_id mapped_area;
+       void*   physical_page_handle;
+       addr_t  virtual_address;
+};
+
+
 IOBuffer*
 IOBuffer::Create(uint32 count, bool vip)
 {
@@ -116,6 +126,92 @@
 
 
 status_t
+IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
+{
+       virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
+       if (cookie == NULL) {
+               cookie = new(std::nothrow) virtual_vec_cookie;
+               if (cookie == NULL)
+                       return B_NO_MEMORY;
+
+               cookie->vec_index = 0;
+               cookie->vec_offset = 0;
+               cookie->mapped_area = -1;
+               cookie->physical_page_handle = NULL;
+               cookie->virtual_address = 0;
+               _cookie = cookie;
+       }
+
+       // recycle a potential previously mapped page
+       if (cookie->physical_page_handle != NULL) {
+               vm_put_physical_page(cookie->virtual_address,
+                       cookie->physical_page_handle);
+       }
+
+       if (cookie->vec_index >= fVecCount)
+               return B_BAD_INDEX;
+
+       if (!fPhysical) {
+               vector = fVecs[cookie->vec_index++];
+               return B_OK;
+       }
+
+       if (cookie->vec_index == 0
+               && (fVecCount > 1 || fVecs[0].iov_len > B_PAGE_SIZE)) {
+               void* mappedAddress;
+               addr_t mappedSize;
+
+               cookie->mapped_area = vm_map_physical_memory_vecs(
+                       vm_kernel_address_space_id(), "io buffer mapped 
physical vecs",
+                       &mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
+                       B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, 
fVecCount);
+
+               if (cookie->mapped_area >= 0) {
+                       vector.iov_base = (void*)mappedAddress;
+                       vector.iov_len = mappedSize;
+                       return B_OK;
+               } else
+                       ktrace_printf("failed to map area: %s\n", 
strerror(cookie->mapped_area));
+       }
+
+       // fallback to page wise mapping
+       iovec& currentVec = fVecs[cookie->vec_index];
+       addr_t address = (addr_t)currentVec.iov_base + cookie->vec_offset;
+       addr_t pageOffset = address % B_PAGE_SIZE;
+
+       status_t result = vm_get_physical_page(address - pageOffset,
+               &cookie->virtual_address, &cookie->physical_page_handle);
+       if (result != B_OK)
+               return result;
+
+       size_t length = min_c(currentVec.iov_len - cookie->vec_offset,
+               B_PAGE_SIZE - pageOffset);
+
+       vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
+       vector.iov_len = length;
+
+       cookie->vec_offset += length;
+       if (cookie->vec_offset >= currentVec.iov_len) {
+               cookie->vec_index++;
+               cookie->vec_offset = 0;
+       }
+
+       return B_OK;
+}
+
+
+void
+IOBuffer::FreeVirtualVecCookie(void* _cookie)
+{
+       virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
+       if (cookie->mapped_area >= 0)
+               delete_area(cookie->mapped_area);
+
+       delete cookie;
+}
+
+
+status_t
 IOBuffer::LockMemory(team_id team, bool isWrite)
 {
        if (fMemoryLocked) {

Modified: haiku/trunk/src/system/kernel/device_manager/IORequest.h
===================================================================
--- haiku/trunk/src/system/kernel/device_manager/IORequest.h    2009-10-11 
16:48:03 UTC (rev 33523)
+++ haiku/trunk/src/system/kernel/device_manager/IORequest.h    2009-10-11 
16:49:44 UTC (rev 33524)
@@ -55,6 +55,10 @@
                        size_t                          VecCount() const { 
return fVecCount; }
                        size_t                          Capacity() const { 
return fCapacity; }
 
+                       status_t                        
GetNextVirtualVec(void*& cookie,
+                                                                       iovec& 
vector);
+                       void                            
FreeVirtualVecCookie(void* cookie);
+
                        status_t                        LockMemory(team_id 
team, bool isWrite);
                        void                            UnlockMemory(team_id 
team, bool isWrite);
                        bool                            IsMemoryLocked() const


Other related posts:

  • » [haiku-commits] r33524 - haiku/trunk/src/system/kernel/device_manager - mmlr