Author: bonefish Date: 2010-06-02 22:46:49 +0200 (Wed, 02 Jun 2010) New Revision: 37000 Changeset: http://dev.haiku-os.org/changeset/37000/haiku Modified: haiku/trunk/src/add-ons/kernel/bus_managers/scsi/emulation.c haiku/trunk/src/system/boot/loader/kernel_args.cpp haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp haiku/trunk/src/system/kernel/cache/file_cache.cpp haiku/trunk/src/system/kernel/cache/vnode_store.cpp haiku/trunk/src/system/kernel/device_manager/IOCache.cpp haiku/trunk/src/system/kernel/device_manager/IORequest.cpp haiku/trunk/src/system/kernel/device_manager/IOSchedulerSimple.cpp haiku/trunk/src/system/kernel/device_manager/devfs.cpp haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp haiku/trunk/src/system/kernel/fs/vfs_request_io.cpp haiku/trunk/src/system/kernel/system_info.cpp haiku/trunk/src/system/kernel/vm/VMCache.cpp haiku/trunk/src/system/kernel/vm/VMPageQueue.h haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp haiku/trunk/src/system/kernel/vm/vm.cpp haiku/trunk/src/system/kernel/vm/vm_page.cpp Log: Fixed more address types related issues. Mostly printf() or comparison warnings, but also some oversights from earlier changes. Modified: haiku/trunk/src/add-ons/kernel/bus_managers/scsi/emulation.c =================================================================== --- haiku/trunk/src/add-ons/kernel/bus_managers/scsi/emulation.c 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/add-ons/kernel/bus_managers/scsi/emulation.c 2010-06-02 20:46:49 UTC (rev 37000) @@ -87,14 +87,15 @@ aligned_phys = (unaligned_phys + buffer_size - 1) & ~(buffer_size - 1); aligned_addr = unaligned_addr + (aligned_phys - unaligned_phys); - SHOW_FLOW(3, "unaligned_phys = %#lx, aligned_phys = %#lx, unaligned_addr = %#lx, aligned_addr = %#lx", - unaligned_phys, aligned_phys, unaligned_addr, aligned_addr); + SHOW_FLOW(3, "unaligned_phys = %#" B_PRIxPHYSADDR ", aligned_phys = %#" + B_PRIxPHYSADDR ", unaligned_addr = %#" B_PRIxADDR ", aligned_addr = %#" + B_PRIxADDR, unaligned_phys, aligned_phys, unaligned_addr, aligned_addr); device->buffer = (void *)aligned_addr; device->buffer_size = buffer_size; // s/g list is directly after buffer device->buffer_sg_list = (void *)(aligned_addr + buffer_size); - device->buffer_sg_list[0].address = (void *)aligned_phys; + device->buffer_sg_list[0].address = aligned_phys; device->buffer_sg_list[0].size = buffer_size; device->buffer_sg_count = 1; @@ -490,8 +491,9 @@ bytes = min(size, req_size); bytes = min(bytes, sg_list->size); - SHOW_FLOW(0, "buffer = %p, virt_addr = %#lx, bytes = %lu, to_buffer = %d", - buffer, sg_list->address + offset, bytes, to_buffer); + SHOW_FLOW(0, "buffer = %p, virt_addr = %#" B_PRIxPHYSADDR ", bytes = %" + B_PRIuSIZE ", to_buffer = %d", buffer, sg_list->address + offset, + bytes, to_buffer); if (to_buffer) { vm_memcpy_from_physical(buffer, sg_list->address + offset, bytes, Modified: haiku/trunk/src/system/boot/loader/kernel_args.cpp =================================================================== --- haiku/trunk/src/system/boot/loader/kernel_args.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/boot/loader/kernel_args.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -370,7 +370,7 @@ status_t -insert_physical_memory_range(addr_t start, size_t size) +insert_physical_memory_range(phys_addr_t start, phys_size_t size) { return insert_physical_address_range(gKernelArgs.physical_memory_range, &gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE, @@ -379,7 +379,7 @@ status_t -insert_physical_allocated_range(addr_t start, size_t size) +insert_physical_allocated_range(phys_addr_t start, phys_size_t size) { return insert_physical_address_range(gKernelArgs.physical_allocated_range, &gKernelArgs.num_physical_allocated_ranges, Modified: haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp =================================================================== --- haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/boot/platform/bios_ia32/mmu.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -111,7 +111,7 @@ static addr_t get_next_physical_address(size_t size) { - addr_t base; + phys_addr_t base; if (!get_free_physical_address_range(gKernelArgs.physical_allocated_range, gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress, size, &base)) { @@ -435,7 +435,7 @@ } // check whether the physical range is still free - addr_t foundBase; + phys_addr_t foundBase; if (!get_free_physical_address_range(gKernelArgs.physical_allocated_range, gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress, size, &foundBase) || foundBase != base) { Modified: haiku/trunk/src/system/kernel/cache/file_cache.cpp =================================================================== --- haiku/trunk/src/system/kernel/cache/file_cache.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/cache/file_cache.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -196,7 +196,7 @@ phys_size_t pagesTransferred = (bytesTransferred + B_PAGE_SIZE - 1) / B_PAGE_SIZE; - if (fOffset + bytesTransferred > fCache->virtual_end) + if (fOffset + (off_t)bytesTransferred > fCache->virtual_end) bytesTransferred = fCache->virtual_end - fOffset; for (uint32 i = 0; i < pagesTransferred; i++) { @@ -343,7 +343,7 @@ generic_size_t bytesEnd = *_numBytes; - if (offset + bytesEnd > ref->cache->virtual_end) + if (offset + (off_t)bytesEnd > ref->cache->virtual_end) bytesEnd = ref->cache->virtual_end - offset; if (status == B_OK && bytesEnd < bytesUntouched) { @@ -387,12 +387,12 @@ generic_io_vec vecs[MAX_IO_VECS]; uint32 vecCount = 0; - size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); + generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); vm_page* pages[MAX_IO_VECS]; int32 pageIndex = 0; // allocate pages for the cache and mark them busy - for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { + for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { vm_page* page = pages[pageIndex++] = vm_page_allocate_page( reservation, PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY); @@ -475,8 +475,9 @@ ref->cache->Unlock(); vm_page_unreserve_pages(reservation); + generic_size_t toRead = bufferSize; status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset, - &vec, 1, 0, &bufferSize); + &vec, 1, 0, &toRead); if (status == B_OK) reserve_pages(ref, reservation, reservePages, false); @@ -501,7 +502,7 @@ // large chunk on the heap. generic_io_vec vecs[MAX_IO_VECS]; uint32 vecCount = 0; - size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); + generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); vm_page* pages[MAX_IO_VECS]; int32 pageIndex = 0; status_t status = B_OK; @@ -510,7 +511,7 @@ bool writeThrough = false; // allocate pages for the cache and mark them busy - for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { + for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { // TODO: if space is becoming tight, and this cache is already grown // big - shouldn't we better steal the pages directly in that case? // (a working set like approach for the file cache) @@ -656,8 +657,9 @@ generic_io_vec vec; vec.base = buffer; vec.length = bufferSize; + generic_size_t toWrite = bufferSize; status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset, - &vec, 1, 0, &bufferSize); + &vec, 1, 0, &toWrite); } if (status == B_OK) @@ -672,8 +674,8 @@ static inline status_t satisfy_cache_io(file_cache_ref* ref, void* cookie, cache_func function, off_t offset, addr_t buffer, bool useBuffer, int32 &pageOffset, - generic_size_t bytesLeft, size_t &reservePages, off_t &lastOffset, - addr_t &lastBuffer, int32 &lastPageOffset, generic_size_t &lastLeft, + size_t bytesLeft, size_t &reservePages, off_t &lastOffset, + addr_t &lastBuffer, int32 &lastPageOffset, size_t &lastLeft, size_t &lastReservedPages, vm_page_reservation* reservation) { if (lastBuffer == buffer) @@ -1286,8 +1288,11 @@ // Caching is disabled -- read directly from the file. generic_io_vec vec; vec.base = (addr_t)buffer; - vec.length = *_size; - return vfs_read_pages(ref->vnode, cookie, offset, &vec, 1, 0, _size); + generic_size_t size = vec.length = *_size; + status_t error = vfs_read_pages(ref->vnode, cookie, offset, &vec, 1, 0, + &size); + *_size = size; + return error; } return cache_io(ref, cookie, offset, (addr_t)buffer, _size, false); @@ -1306,9 +1311,12 @@ if (buffer != NULL) { generic_io_vec vec; vec.base = (addr_t)buffer; - vec.length = *_size; - return vfs_write_pages(ref->vnode, cookie, offset, &vec, 1, 0, - _size); + generic_size_t size = vec.length = *_size; + + status_t error = vfs_write_pages(ref->vnode, cookie, offset, &vec, + 1, 0, &size); + *_size = size; + return error; } // NULL buffer -- use a dummy buffer to write zeroes Modified: haiku/trunk/src/system/kernel/cache/vnode_store.cpp =================================================================== --- haiku/trunk/src/system/kernel/cache/vnode_store.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/cache/vnode_store.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -52,7 +52,7 @@ generic_size_t bytesEnd = *_numBytes; - if (offset + bytesEnd > virtual_end) + if (offset + (off_t)bytesEnd > virtual_end) bytesEnd = virtual_end - offset; // If the request could be filled completely, or an error occured, Modified: haiku/trunk/src/system/kernel/device_manager/IOCache.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/IOCache.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/device_manager/IOCache.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -232,7 +232,7 @@ return B_BAD_VALUE; // truncate the request to the device capacity - if (fDeviceCapacity - offset < length) + if (fDeviceCapacity - offset < (off_t)length) length = fDeviceCapacity - offset; _bytesTransferred = 0; @@ -374,8 +374,8 @@ // request that doesn't cover the complete missing range. if (request->IsRead() || requestOffset < (off_t)firstMissing * B_PAGE_SIZE - || requestOffset + requestLength - > (lastMissing + 1) * B_PAGE_SIZE) { + || requestOffset + (off_t)requestLength + > (off_t)(lastMissing + 1) * B_PAGE_SIZE) { status_t error = _TransferPages(firstMissing - firstPageOffset, missingPages, false, isVIP); if (error != B_OK) { Modified: haiku/trunk/src/system/kernel/device_manager/IORequest.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/IORequest.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/device_manager/IORequest.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -260,9 +260,9 @@ kprintf(" origin: %s\n", fUser ? "user" : "kernel"); kprintf(" kind: %s\n", fPhysical ? "physical" : "virtual"); - kprintf(" length: %lu\n", fLength); - kprintf(" capacity: %lu\n", fCapacity); - kprintf(" vecs: %lu\n", fVecCount); + kprintf(" length: %" B_PRIuGENADDR "\n", fLength); + kprintf(" capacity: %" B_PRIuSIZE "\n", fCapacity); + kprintf(" vecs: %" B_PRIuSIZE "\n", fVecCount); for (uint32 i = 0; i < fVecCount; i++) { kprintf(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n", @@ -354,7 +354,7 @@ if (offset < startOffset) { // If the complete vector is before the start offset, skip it. - if (offset + length <= startOffset) { + if (offset + (off_t)length <= startOffset) { offset += length; continue; } @@ -367,7 +367,7 @@ length -= diff; } - if (offset + length > endOffset) { + if (offset + (off_t)length > endOffset) { // If we're already beyond the end offset, we're done. if (offset >= endOffset) break; @@ -462,7 +462,7 @@ vecOffset = 0; if (base >= bounceBufferStart && base < bounceBufferEnd) { - if (offset + length > endOffset) + if (offset + (off_t)length > endOffset) length = endOffset - offset; status_t error = fParent->CopyData(offset, bounceBuffer + (base - bounceBufferStart), length); @@ -666,10 +666,10 @@ kprintf(" dma buffer: %p\n", fDMABuffer); kprintf(" offset: %-8Ld (original: %Ld)\n", fOffset, fOriginalOffset); - kprintf(" length: %-8lu (original: %lu)\n", fLength, - fOriginalLength); - kprintf(" transferred: %lu\n", fTransferredBytes); - kprintf(" block size: %lu\n", fBlockSize); + kprintf(" length: %-8" B_PRIuGENADDR " (original: %" + B_PRIuGENADDR ")\n", fLength, fOriginalLength); + kprintf(" transferred: %" B_PRIuGENADDR "\n", fTransferredBytes); + kprintf(" block size: %" B_PRIuGENADDR "\n", fBlockSize); kprintf(" saved vec index: %u\n", fSavedVecIndex); kprintf(" saved vec length: %u\n", fSavedVecLength); kprintf(" r/w: %s\n", IsWrite() ? "write" : "read"); @@ -772,7 +772,7 @@ generic_size_t length, IORequest*& _subRequest) { ASSERT(parentOffset >= fOffset && length <= fLength - && parentOffset - fOffset <= fLength - length); + && parentOffset - fOffset <= (off_t)(fLength - length)); // find start vec generic_size_t vecOffset = parentOffset - fOffset; @@ -1160,7 +1160,7 @@ uint8* buffer = (uint8*)_buffer; - if (offset < fOffset || offset + size > fOffset + fLength) { + if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) { panic("IORequest::_CopyData(): invalid range: (%lld, %lu)", offset, size); return B_BAD_VALUE; @@ -1284,9 +1284,9 @@ kprintf(" mutex: %p\n", &fLock); kprintf(" IOBuffer: %p\n", fBuffer); kprintf(" offset: %Ld\n", fOffset); - kprintf(" length: %lu\n", fLength); - kprintf(" transfer size: %lu\n", fTransferSize); - kprintf(" relative offset: %lu\n", fRelativeParentOffset); + kprintf(" length: %" B_PRIuGENADDR "\n", fLength); + kprintf(" transfer size: %" B_PRIuGENADDR "\n", fTransferSize); + kprintf(" relative offset: %" B_PRIuGENADDR "\n", fRelativeParentOffset); kprintf(" pending children: %ld\n", fPendingChildren); kprintf(" flags: %#lx\n", fFlags); kprintf(" team: %ld\n", fTeam); @@ -1296,8 +1296,8 @@ kprintf(" finished cvar: %p\n", &fFinishedCondition); kprintf(" iteration:\n"); kprintf(" vec index: %lu\n", fVecIndex); - kprintf(" vec offset: %lu\n", fVecOffset); - kprintf(" remaining bytes: %lu\n", fRemainingBytes); + kprintf(" vec offset: %" B_PRIuGENADDR "\n", fVecOffset); + kprintf(" remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes); kprintf(" callbacks:\n"); kprintf(" finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie); kprintf(" iteration %p, cookie %p\n", fIterationCallback, Modified: haiku/trunk/src/system/kernel/device_manager/IOSchedulerSimple.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/IOSchedulerSimple.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/device_manager/IOSchedulerSimple.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -424,7 +424,7 @@ usedBandwidth = 0; if (fDMAResource != NULL) { - while (quantum >= fBlockSize && request->RemainingBytes() > 0) { + while (quantum >= (off_t)fBlockSize && request->RemainingBytes() > 0) { IOOperation* operation = fUnusedOperations.RemoveHead(); if (operation == NULL) return false; @@ -618,14 +618,14 @@ fActiveRequestOwners.Remove(&marker); } - if (owner == NULL || quantum < fBlockSize) { + if (owner == NULL || quantum < (off_t)fBlockSize) { if (!_NextActiveRequestOwner(owner, quantum)) { // we've been asked to terminate return B_OK; } } - while (resourcesAvailable && iterationBandwidth >= fBlockSize) { + while (resourcesAvailable && iterationBandwidth >= (off_t)fBlockSize) { //dprintf("IOSchedulerSimple::_Scheduler(): request owner: %p (thread %ld)\n", //owner, owner->thread); // Prepare operations for the owner. @@ -643,12 +643,14 @@ quantum -= bandwidth; iterationBandwidth -= bandwidth; - if (quantum < fBlockSize || iterationBandwidth < fBlockSize) + if (quantum < (off_t)fBlockSize + || iterationBandwidth < (off_t)fBlockSize) { break; + } } - while (resourcesAvailable && quantum >= fBlockSize - && iterationBandwidth >= fBlockSize) { + while (resourcesAvailable && quantum >= (off_t)fBlockSize + && iterationBandwidth >= (off_t)fBlockSize) { IORequest* request = owner->requests.Head(); if (request == NULL) { resourcesAvailable = false; Modified: haiku/trunk/src/system/kernel/device_manager/devfs.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/devfs.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/device_manager/devfs.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -497,7 +497,7 @@ off_t offset = request->Offset(); ASSERT(offset >= 0); - ASSERT(offset + request->Length() <= partition->info.size); + ASSERT(offset + (off_t)request->Length() <= partition->info.size); request->SetOffset(offset + partition->info.offset); } @@ -1792,7 +1792,7 @@ } if (vnode->stream.u.dev.partition != NULL) { - if (request->Offset() + request->Length() + if (request->Offset() + (off_t)request->Length() > vnode->stream.u.dev.partition->info.size) { request->SetStatusAndNotify(B_BAD_VALUE); return B_BAD_VALUE; Modified: haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp =================================================================== --- haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/device_manager/dma_resources.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -75,9 +75,9 @@ { kprintf("DMABuffer at %p\n", this); - kprintf(" bounce buffer: %p (physical %#lx)\n", + kprintf(" bounce buffer: %p (physical %#" B_PRIxPHYSADDR ")\n", fBounceBuffer->address, fBounceBuffer->physical_address); - kprintf(" bounce buffer size: %lu\n", fBounceBuffer->size); + kprintf(" bounce buffer size: %" B_PRIxPHYSADDR "\n", fBounceBuffer->size); kprintf(" vecs: %lu\n", fVecCount); for (uint32 i = 0; i < fVecCount; i++) { @@ -169,8 +169,10 @@ fBounceBufferSize); } - dprintf("DMAResource@%p: low/high %lx/%lx, max segment count %lu, align %lu, " - "boundary %lu, max transfer %lu, max segment size %lu\n", this, + dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR "/%" B_PRIxGENADDR + ", max segment count %" B_PRIu32 ", align %" B_PRIuGENADDR ", " + "boundary %" B_PRIuGENADDR ", max transfer %" B_PRIuGENADDR + ", max segment size %" B_PRIuGENADDR "\n", this, fRestrictions.low_address, fRestrictions.high_address, fRestrictions.max_segment_count, fRestrictions.alignment, fRestrictions.boundary, fRestrictions.max_transfer_size, @@ -224,10 +226,14 @@ area_id area = -1; phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE); - if (fRestrictions.alignment > B_PAGE_SIZE) - dprintf("dma buffer restrictions not yet implemented: alignment %lu\n", fRestrictions.alignment); - if (fRestrictions.boundary > B_PAGE_SIZE) - dprintf("dma buffer restrictions not yet implemented: boundary %lu\n", fRestrictions.boundary); + if (fRestrictions.alignment > B_PAGE_SIZE) { + dprintf("dma buffer restrictions not yet implemented: alignment %" + B_PRIuGENADDR "\n", fRestrictions.alignment); + } + if (fRestrictions.boundary > B_PAGE_SIZE) { + dprintf("dma buffer restrictions not yet implemented: boundary %" + B_PRIuGENADDR "\n", fRestrictions.boundary); + } bounceBuffer = (void*)fRestrictions.low_address; // TODO: We also need to enforce the boundary restrictions. @@ -623,7 +629,7 @@ // vec is a bounce buffer segment shorter than the block size. If so, we // have to cut back the complete block and use a bounce buffer for it // entirely. - if (diff == 0 && offset + dmaLength > requestEnd) { + if (diff == 0 && offset + (off_t)dmaLength > requestEnd) { const generic_io_vec& dmaVec = dmaBuffer->VecAt(dmaBuffer->VecCount() - 1); ASSERT(dmaVec.base >= dmaBuffer->PhysicalBounceBufferAddress() @@ -708,9 +714,10 @@ operation->SetBuffer(dmaBuffer); operation->SetBlockSize(fBlockSize); operation->SetOriginalRange(originalOffset, - min_c(offset + dmaLength, requestEnd) - originalOffset); + min_c(offset + (off_t)dmaLength, requestEnd) - originalOffset); operation->SetRange(offset, dmaLength); - operation->SetPartial(partialBegin != 0, offset + dmaLength > requestEnd); + operation->SetPartial(partialBegin != 0, + offset + (off_t)dmaLength > requestEnd); // If we don't need the bounce buffer, we put it back, otherwise operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize); Modified: haiku/trunk/src/system/kernel/fs/vfs_request_io.cpp =================================================================== --- haiku/trunk/src/system/kernel/fs/vfs_request_io.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/fs/vfs_request_io.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -226,7 +226,7 @@ static status_t do_iterative_fd_io_finish(void* _cookie, io_request* request, status_t status, - bool partialTransfer, size_t transferEndOffset) + bool partialTransfer, generic_size_t transferEndOffset) { iterative_io_cookie* cookie = (iterative_io_cookie*)_cookie; @@ -278,7 +278,7 @@ for (uint32 i = 0; i < fileVecCount; i++) { const file_io_vec& fileVec = fileVecs[i]; - size_t toTransfer = min_c(fileVec.length, length); + size_t toTransfer = min_c(fileVec.length, (off_t)length); size_t transferred = toTransfer; error = io.IO(fileVec.offset, vecBase, &transferred); if (error != B_OK) @@ -327,7 +327,7 @@ TRACE_RIO("[%ld] I/O: offset: %lld, vecBase: %p, length: %lu\n", find_thread(NULL), offset, vecBase, vecLength); - generic_size_t transferred = vecLength; + size_t transferred = vecLength; status_t error = io.IO(offset, vecBase, &transferred); if (error != B_OK) { TRACE_RIO("[%ld] I/O failed: %#lx\n", find_thread(NULL), error); @@ -382,8 +382,8 @@ status_t vfs_asynchronous_read_pages(struct vnode* vnode, void* cookie, off_t pos, - const generic_io_vec* vecs, size_t count, size_t numBytes, uint32 flags, - AsyncIOCallback* callback) + const generic_io_vec* vecs, size_t count, generic_size_t numBytes, + uint32 flags, AsyncIOCallback* callback) { IORequest* request = IORequest::Create((flags & B_VIP_IO_REQUEST) != 0); if (request == NULL) { @@ -408,8 +408,8 @@ status_t vfs_asynchronous_write_pages(struct vnode* vnode, void* cookie, off_t pos, - const generic_io_vec* vecs, size_t count, size_t numBytes, uint32 flags, - AsyncIOCallback* callback) + const generic_io_vec* vecs, size_t count, generic_size_t numBytes, + uint32 flags, AsyncIOCallback* callback) { IORequest* request = IORequest::Create((flags & B_VIP_IO_REQUEST) != 0); if (request == NULL) { Modified: haiku/trunk/src/system/kernel/system_info.cpp =================================================================== --- haiku/trunk/src/system/kernel/system_info.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/system_info.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -52,7 +52,7 @@ kprintf(" [%ld] %Ld\n", i + 1, gCPU[i].active_time); // ToDo: Add page_faults - kprintf("pages:\t\t%ld (%ld max)\n", + kprintf("pages:\t\t%" B_PRIuPHYSADDR " (%" B_PRIuPHYSADDR " max)\n", vm_page_num_pages() - vm_page_num_free_pages(), vm_page_num_pages()); kprintf("sems:\t\t%ld (%ld max)\n", sem_used_sems(), sem_max_sems()); Modified: haiku/trunk/src/system/kernel/vm/VMCache.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMCache.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/vm/VMCache.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -752,8 +752,8 @@ vm_page* otherPage = pages.Lookup(page->cache_offset); if (otherPage != NULL) { panic("VMCache::InsertPage(): there's already page %p with cache " - "offset %lu in cache %p; inserting page %p", otherPage, - page->cache_offset, this, page); + "offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p", + otherPage, page->cache_offset, this, page); } #endif // KDEBUG Modified: haiku/trunk/src/system/kernel/vm/VMPageQueue.h =================================================================== --- haiku/trunk/src/system/kernel/vm/VMPageQueue.h 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/vm/VMPageQueue.h 2010-06-02 20:46:49 UTC (rev 37000) @@ -49,7 +49,7 @@ inline vm_page* Previous(vm_page* page) const; inline vm_page* Next(vm_page* page) const; - inline uint32 Count() const { return fCount; } + inline phys_addr_t Count() const { return fCount; } inline Iterator GetIterator() const; @@ -58,7 +58,7 @@ protected: const char* fName; spinlock fLock; - uint32 fCount; + phys_addr_t fCount; PageList fPages; }; Modified: haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/vm/VMTranslationMap.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -44,7 +44,7 @@ addr_t end = address + size; #if DEBUG_PAGE_ACCESS for (; address != end; address += B_PAGE_SIZE) { - addr_t physicalAddress; + phys_addr_t physicalAddress; uint32 flags; if (Query(address, &physicalAddress, &flags) == B_OK && (flags & PAGE_PRESENT) == 0) { @@ -85,7 +85,7 @@ addr_t end = address + area->Size(); #if DEBUG_PAGE_ACCESS for (; address != end; address += B_PAGE_SIZE) { - addr_t physicalAddress; + phys_addr_t physicalAddress; uint32 flags; if (Query(address, &physicalAddress, &flags) == B_OK && (flags & PAGE_PRESENT) == 0) { Modified: haiku/trunk/src/system/kernel/vm/vm.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/vm.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/vm/vm.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -1303,8 +1303,8 @@ } page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); if (page == NULL) { - panic("looking up page failed for pa 0x%lx\n", - physicalAddress); + panic("looking up page failed for pa %#" B_PRIxPHYSADDR + "\n", physicalAddress); } DEBUG_PAGE_ACCESS_START(page); @@ -2934,8 +2934,8 @@ : &cache_info_compare_committed); } - kprintf("total committed memory: %lld, total used pages: %lu\n", - totalCommitted, totalPages); + kprintf("total committed memory: %" B_PRIdOFF ", total used pages: %" + B_PRIuPHYSADDR "\n", totalCommitted, totalPages); kprintf("%lu caches (%lu root caches), sorted by %s per cache " "tree...\n\n", totalCount, rootCount, sortByPageCount ? "page count" : "committed size"); @@ -3021,10 +3021,11 @@ for (VMCachePagesTree::Iterator it = cache->pages.GetIterator(); vm_page* page = it.Next();) { if (!vm_page_is_dummy(page)) { - kprintf("\t%p ppn 0x%lx offset 0x%lx state %u (%s) " - "wired_count %u\n", page, page->physical_page_number, - page->cache_offset, page->State(), - page_state_to_string(page->State()), page->wired_count); + kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR + " state %u (%s) wired_count %u\n", page, + page->physical_page_number, page->cache_offset, + page->State(), page_state_to_string(page->State()), + page->wired_count); } else { kprintf("\t%p DUMMY PAGE state %u (%s)\n", page, page->State(), page_state_to_string(page->State())); @@ -3174,8 +3175,8 @@ static int dump_available_memory(int argc, char** argv) { - kprintf("Available memory: %Ld/%lu bytes\n", - sAvailableMemory, vm_page_num_pages() * B_PAGE_SIZE); + kprintf("Available memory: %" B_PRIdOFF "/%" B_PRIuPHYSADDR " bytes\n", + sAvailableMemory, (phys_addr_t)vm_page_num_pages() * B_PAGE_SIZE); return 0; } @@ -6071,8 +6072,8 @@ vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); if (page == NULL) { - panic("area %p looking up page failed for pa 0x%lx\n", area, - physicalAddress); + panic("area %p looking up page failed for pa %#" B_PRIxPHYSADDR + "\n", area, physicalAddress); map->Unlock(); return B_ERROR; } Modified: haiku/trunk/src/system/kernel/vm/vm_page.cpp =================================================================== --- haiku/trunk/src/system/kernel/vm/vm_page.cpp 2010-06-02 20:43:59 UTC (rev 36999) +++ haiku/trunk/src/system/kernel/vm/vm_page.cpp 2010-06-02 20:46:49 UTC (rev 37000) @@ -657,11 +657,11 @@ if (!evaluate_debug_expression(argv[index], &value, false)) return 0; - addr_t pageAddress = (addr_t)value; + uint64 pageAddress = value; struct vm_page* page; if (addressIsPointer) { - page = (struct vm_page *)pageAddress; + page = (struct vm_page *)(addr_t)pageAddress; } else { if (!physical) { VMAddressSpace *addressSpace = VMAddressSpace::Kernel(); @@ -670,13 +670,15 @@ addressSpace = debug_get_debugged_thread()->team->address_space; uint32 flags = 0; + phys_addr_t physicalAddress; if (addressSpace->TranslationMap()->QueryInterrupt(pageAddress, - &pageAddress, &flags) != B_OK + &physicalAddress, &flags) != B_OK || (flags & PAGE_PRESENT) == 0) { kprintf("Virtual address not mapped to a physical page in this " "address space.\n"); return 0; } + pageAddress = physicalAddress; } page = vm_lookup_page(pageAddress / B_PAGE_SIZE); @@ -685,9 +687,10 @@ kprintf("PAGE: %p\n", page); kprintf("queue_next,prev: %p, %p\n", page->queue_link.next, page->queue_link.previous); - kprintf("physical_number: %#lx\n", page->physical_page_number); + kprintf("physical_number: %#" B_PRIxPHYSADDR "\n", + page->physical_page_number); kprintf("cache: %p\n", page->Cache()); - kprintf("cache_offset: %ld\n", page->cache_offset); + kprintf("cache_offset: %" B_PRIuPHYSADDR "\n", page->cache_offset); kprintf("cache_next: %p\n", page->cache_next); kprintf("state: %s\n", page_state_to_string(page->State())); kprintf("wired_count: %d\n", page->wired_count); @@ -777,8 +780,9 @@ return 0; } - kprintf("queue = %p, queue->head = %p, queue->tail = %p, queue->count = %ld\n", - queue, queue->Head(), queue->Tail(), queue->Count()); + kprintf("queue = %p, queue->head = %p, queue->tail = %p, queue->count = %" + B_PRIuPHYSADDR "\n", queue, queue->Head(), queue->Tail(), + queue->Count()); if (argc == 3) { struct vm_page *page = queue->Head(); @@ -840,8 +844,10 @@ page_run longestCachedRun = { 0, 0 }; for (page_num_t i = 0; i < sNumPages; i++) { - if (sPages[i].State() > 7) - panic("page %li at %p has invalid state!\n", i, &sPages[i]); + if (sPages[i].State() > 7) { + panic("page %" B_PRIuPHYSADDR " at %p has invalid state!\n", i, + &sPages[i]); + } uint32 pageState = sPages[i].State(); @@ -877,7 +883,7 @@ } kprintf("page stats:\n"); - kprintf("total: %lu\n", sNumPages); + kprintf("total: %" B_PRIuPHYSADDR "\n", sNumPages); kprintf("active: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n", counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]); @@ -898,11 +904,11 @@ kprintf("unsatisfied page reservations: %" B_PRId32 "\n", sUnsatisfiedPageReservations); kprintf("mapped pages: %lu\n", gMappedPagesCount); - kprintf("longest free pages run: %" B_PRIuSIZE " pages (at %" B_PRIuSIZE - ")\n", longestFreeRun.Length(), + kprintf("longest free pages run: %" B_PRIuPHYSADDR " pages (at %" + B_PRIuPHYSADDR ")\n", longestFreeRun.Length(), sPages[longestFreeRun.start].physical_page_number); - kprintf("longest free/cached pages run: %" B_PRIuSIZE " pages (at %" - B_PRIuSIZE ")\n", longestCachedRun.Length(), + kprintf("longest free/cached pages run: %" B_PRIuPHYSADDR " pages (at %" + B_PRIuPHYSADDR ")\n", longestCachedRun.Length(), sPages[longestCachedRun.start].physical_page_number); kprintf("waiting threads:\n"); @@ -914,19 +920,20 @@ waiter->missing, waiter->dontTouch); } - kprintf("\nfree queue: %p, count = %ld\n", &sFreePageQueue, + kprintf("\nfree queue: %p, count = %" B_PRIuPHYSADDR "\n", &sFreePageQueue, sFreePageQueue.Count()); - kprintf("clear queue: %p, count = %ld\n", &sClearPageQueue, + kprintf("clear queue: %p, count = %" B_PRIuPHYSADDR "\n", &sClearPageQueue, sClearPageQueue.Count()); - kprintf("modified queue: %p, count = %ld (%ld temporary, %lu swappable, " - "inactive: %lu)\n", &sModifiedPageQueue, sModifiedPageQueue.Count(), + kprintf("modified queue: %p, count = %" B_PRIuPHYSADDR " (%" B_PRId32 + " temporary, %" B_PRIuPHYSADDR " swappable, " "inactive: %" + B_PRIuPHYSADDR ")\n", &sModifiedPageQueue, sModifiedPageQueue.Count(), sModifiedTemporaryPages, swappableModified, swappableModifiedInactive); - kprintf("active queue: %p, count = %ld\n", &sActivePageQueue, - sActivePageQueue.Count()); - kprintf("inactive queue: %p, count = %ld\n", &sInactivePageQueue, - sInactivePageQueue.Count()); - kprintf("cached queue: %p, count = %ld\n", &sCachedPageQueue, - sCachedPageQueue.Count()); + kprintf("active queue: %p, count = %" B_PRIuPHYSADDR "\n", + &sActivePageQueue, sActivePageQueue.Count()); + kprintf("inactive queue: %p, count = %" B_PRIuPHYSADDR "\n", + &sInactivePageQueue, sInactivePageQueue.Count()); + kprintf("cached queue: %p, count = %" B_PRIuPHYSADDR "\n", + &sCachedPageQueue, sCachedPageQueue.Count()); return 0; } @@ -1301,8 +1308,8 @@ startPage, length)); if (sPhysicalPageOffset > startPage) { - dprintf("mark_page_range_in_use(%#" B_PRIxADDR ", %#" B_PRIxSIZE "): " - "start page is before free list\n", startPage, length); + dprintf("mark_page_range_in_use(%#" B_PRIxPHYSADDR ", %#" B_PRIxPHYSADDR + "): start page is before free list\n", startPage, length); if (sPhysicalPageOffset - startPage >= length) return B_OK; length -= sPhysicalPageOffset - startPage; @@ -1312,8 +1319,8 @@ startPage -= sPhysicalPageOffset; if (startPage + length > sNumPages) { - dprintf("mark_page_range_in_use(%#" B_PRIxADDR ", %#" B_PRIxSIZE "): " - "range would extend past free list\n", startPage, length); + dprintf("mark_page_range_in_use(%#" B_PRIxPHYSADDR ", %#" B_PRIxPHYSADDR + "): range would extend past free list\n", startPage, length); if (startPage >= sNumPages) return B_OK; length = sNumPages - startPage; @@ -1349,8 +1356,8 @@ case PAGE_STATE_CACHED: default: // uh - dprintf("mark_page_range_in_use: page 0x%lx in non-free state %d!\n", - startPage + i, page->State()); + dprintf("mark_page_range_in_use: page %#" B_PRIxPHYSADDR + " in non-free state %d!\n", startPage + i, page->State()); break; } } @@ -1539,7 +1546,7 @@ uint32 PageCount() const { return fPageCount; } virtual void IOFinished(status_t status, bool partialTransfer, - size_t bytesTransferred); + generic_size_t bytesTransferred); private: PageWriterRun* fRun; struct VMCache* fCache; @@ -1703,8 +1710,8 @@ phys_addr_t nextBase = fVecs[fVecCount - 1].base + fVecs[fVecCount - 1].length; - if (page->physical_page_number << PAGE_SHIFT == nextBase - && page->cache_offset == fOffset + fPageCount) { + if ((phys_addr_t)page->physical_page_number << PAGE_SHIFT == nextBase + && (off_t)page->cache_offset == fOffset + fPageCount) { // append to last iovec fVecs[fVecCount - 1].length += B_PAGE_SIZE; fPageCount++; @@ -1712,8 +1719,8 @@ } nextBase = fVecs[0].base - B_PAGE_SIZE; - if (page->physical_page_number << PAGE_SHIFT == nextBase - && page->cache_offset == fOffset - 1) { + if ((phys_addr_t)page->physical_page_number << PAGE_SHIFT == nextBase + && (off_t)page->cache_offset == fOffset - 1) { // prepend to first iovec and adjust offset fVecs[0].base = nextBase; fVecs[0].length += B_PAGE_SIZE; @@ -1722,12 +1729,12 @@ return true; } - if ((page->cache_offset == fOffset + fPageCount - || page->cache_offset == fOffset - 1) + if (((off_t)page->cache_offset == fOffset + fPageCount + || (off_t)page->cache_offset == fOffset - 1) && fVecCount < sizeof(fVecs) / sizeof(fVecs[0])) { // not physically contiguous or not in the right order uint32 vectorIndex; - if (page->cache_offset < fOffset) { + if ((off_t)page->cache_offset < fOffset) { // we are pre-pending another vector, move the other vecs for (uint32 i = fVecCount; i > 0; i--) fVecs[i] = fVecs[i - 1]; @@ -1783,7 +1790,7 @@ void PageWriteTransfer::IOFinished(status_t status, bool partialTransfer, - size_t bytesTransferred) + generic_size_t bytesTransferred) { SetStatus(status, bytesTransferred); fRun->PageWritten(this, fStatus, partialTransfer, bytesTransferred); @@ -2892,7 +2899,7 @@ // reserve, but should be a few more pages, so we don't have to extract // a cached page with each allocation. sFreePagesTarget = VM_PAGE_RESERVE_USER - + std::max((uint32)32, sNumPages / 1024); + + std::max((page_num_t)32, sNumPages / 1024); // The target of free + cached and inactive pages. On low-memory machines // keep things tight. free + cached is the pool of immediately allocatable @@ -3342,7 +3349,7 @@ } dprintf("vm_page_allocate_page_run(): Failed to allocate run of " - "length %" B_PRIuSIZE " in second iteration!", length); + "length %" B_PRIuPHYSADDR " in second iteration!", length); freeClearQueueLocker.Unlock(); vm_page_unreserve_pages(&reservation);