added 4 changesets to branch 'refs/remotes/ahenriksson-github/production' old head: 0000000000000000000000000000000000000000 new head: 62e1c717aeab270bf368387c0f768e6e43b1bfc1 ---------------------------------------------------------------------------- 0db3ec7: Move an inode e37a875: Added ability to specify allowed range in BlockAllocator::AllocateBlocks f534a0f: Factored the code to add a block_run to a data_stream out of _GrowStream() This is done so the code can be reused when moving the file stream. 62e1c71: Added ability to move a file stream to BFS StreamInRange checks if a file stream is inside a range MoveStream moves blocks as necessary for the stream to fit in a range [ Andreas Henriksson <sausageboy@xxxxxxxxx> ] ---------------------------------------------------------------------------- 8 files changed, 1049 insertions(+), 238 deletions(-) .../kernel/file_systems/bfs/BlockAllocator.cpp | 89 +- .../kernel/file_systems/bfs/BlockAllocator.h | 6 +- src/add-ons/kernel/file_systems/bfs/Index.cpp | 18 + src/add-ons/kernel/file_systems/bfs/Index.h | 4 + src/add-ons/kernel/file_systems/bfs/Inode.cpp | 980 ++++++++++++---- src/add-ons/kernel/file_systems/bfs/Inode.h | 32 +- src/add-ons/kernel/file_systems/bfs/Volume.h | 7 +- .../kernel/file_systems/bfs/kernel_interface.cpp | 151 +++ ############################################################################ Commit: 0db3ec785e64a18018f155c393024d058a7157dc Author: Andreas Henriksson <sausageboy@xxxxxxxxx> Date: Fri Jun 8 11:18:42 2012 UTC Committer: ahenriksson <sausageboy@xxxxxxxxx> Commit-Date: Fri Jun 29 15:52:22 2012 UTC Move an inode ---------------------------------------------------------------------------- diff --git a/src/add-ons/kernel/file_systems/bfs/Index.cpp b/src/add-ons/kernel/file_systems/bfs/Index.cpp index abdcb86..2ee601b 100644 --- a/src/add-ons/kernel/file_systems/bfs/Index.cpp +++ b/src/add-ons/kernel/file_systems/bfs/Index.cpp @@ -384,3 +384,21 @@ Index::UpdateLastModified(Transaction& transaction, Inode* inode, return status; } + +status_t +Index::UpdateInode(Transaction& transaction, const uint8* key, uint16 length, + off_t oldInodeID, off_t newInodeID) +{ + // remove node and insert it with the new id + BPlusTree* tree = Node()->Tree(); + status_t status = tree->Remove(transaction, key, length, oldInodeID); + + if (status == B_ENTRY_NOT_FOUND) + return B_OK; + + if (status != B_OK) + return status; + + return tree->Insert(transaction, key, length, newInodeID); +} + diff --git a/src/add-ons/kernel/file_systems/bfs/Index.h b/src/add-ons/kernel/file_systems/bfs/Index.h index 5825548..c580359 100644 --- a/src/add-ons/kernel/file_systems/bfs/Index.h +++ b/src/add-ons/kernel/file_systems/bfs/Index.h @@ -53,6 +53,10 @@ public: status_t UpdateLastModified(Transaction& transaction, Inode* inode, bigtime_t modified = -1); + status_t UpdateInode(Transaction& transaction, + const uint8* key, uint16 length, + off_t oldInodeId, off_t newInodeId); + private: Index(const Index& other); Index& operator=(const Index& other); diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.cpp b/src/add-ons/kernel/file_systems/bfs/Inode.cpp index 8e0c0f0..df64b8a 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.cpp +++ b/src/add-ons/kernel/file_systems/bfs/Inode.cpp @@ -2787,6 +2787,20 @@ Inode::Create(Transaction& transaction, Inode* parent, const char* name, return B_OK; } +status_t +Inode::Copy(Transaction& transaction, off_t targetBlock) +{ + NodeGetter target(fVolume); + uint8* targetData = target.SetToWritable(transaction, targetBlock, true); + + CachedBlock source(fVolume, fID); + memcpy(targetData, source.Block(), fVolume->BlockSize()); + + // update inode ID in target block + target.WritableNode()->inode_num = fVolume->ToBlockRun(targetBlock); + + return B_OK; +} // #pragma mark - AttributeIterator diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.h b/src/add-ons/kernel/file_systems/bfs/Inode.h index c3f1b9c..9f44469 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.h +++ b/src/add-ons/kernel/file_systems/bfs/Inode.h @@ -166,6 +166,8 @@ public: ino_t* _id = NULL, Inode** _inode = NULL, fs_vnode_ops* vnodeOps = NULL, uint32 publishFlags = 0); + status_t Copy(Transaction& transaction, + off_t targetBlock); // index maintaining helper void UpdateOldSize() { fOldSize = Size(); } diff --git a/src/add-ons/kernel/file_systems/bfs/kernel_interface.cpp b/src/add-ons/kernel/file_systems/bfs/kernel_interface.cpp index fff1563..ecc18cb 100644 --- a/src/add-ons/kernel/file_systems/bfs/kernel_interface.cpp +++ b/src/add-ons/kernel/file_systems/bfs/kernel_interface.cpp @@ -613,6 +613,148 @@ bfs_get_vnode_name(fs_volume* _volume, fs_vnode* _node, char* buffer, static status_t +bfs_resize_update_index_references(Transaction& transaction, Inode* inode, + off_t newInodeID) +{ + // update user file attributes + AttributeIterator iterator(inode); + + char attributeName[B_FILE_NAME_LENGTH]; + size_t nameLength; + uint32 attributeType; + ino_t attributeID; + + uint8 key[BPLUSTREE_MAX_KEY_LENGTH]; + size_t keyLength; + + status_t status; + Index index(inode->GetVolume()); + + while (iterator.GetNext(attributeName, &nameLength, &attributeType, + &attributeID) == B_OK) { + // ignore attribute if not in index + if (index.SetTo(attributeName) != B_OK) + continue; + + status = inode->ReadAttribute(attributeName, attributeType, 0, key, + &keyLength); + if (status != B_OK) + return status; + + status = index.UpdateInode(transaction, key, (uint16)keyLength, + inode->ID(), newInodeID); + if (status != B_OK) + return status; + } + + // update built-in attributes + if (inode->InNameIndex()) { + status = index.SetTo("name"); + if (status != B_OK) + return status; + + status = inode->GetName((char*)key, BPLUSTREE_MAX_KEY_LENGTH); + if (status != B_OK) + return status; + + status = index.UpdateInode(transaction, key, (uint16)strlen((char*)key), + inode->ID(), newInodeID); + if (status != B_OK) + return status; + } + if (inode->InSizeIndex()) { + status = index.SetTo("size"); + if (status != B_OK) + return status; + + off_t size = inode->Size(); + status = index.UpdateInode(transaction, (uint8*)&size, sizeof(int64), + inode->ID(), newInodeID); + if (status != B_OK) + return status; + } + if (inode->InLastModifiedIndex()) { + status = index.SetTo("last_modified"); + if (status != B_OK) + return status; + + off_t modified = inode->LastModified(); + status = index.UpdateInode(transaction, (uint8*)&modified, + sizeof(int64), inode->ID(), newInodeID); + if (status != B_OK) + return status; + } + return B_OK; +} + + +static status_t +bfs_resize_update_parent(Transaction& transaction, Inode* inode, + off_t newInodeID) +{ + Volume* volume = inode->GetVolume(); + + // get name of this inode + char name[B_FILE_NAME_LENGTH]; + status_t status = inode->GetName(name, B_FILE_NAME_LENGTH); + if (status != B_OK) + return status; + + // get Inode of parent + block_run parentBlockRun = inode->Parent(); + off_t parentBlock = parentBlockRun.Start() + + (parentBlockRun.AllocationGroup() + << volume->AllocationGroupShift()); + + Vnode parentVnode(volume, parentBlock); + Inode* parent; + status = parentVnode.Get(&parent); + if (status != B_OK) + return status; + + // update inode id in parent + BPlusTree* tree = parent->Tree(); + return tree->Replace(transaction, (const uint8*)name, (uint16)strlen(name), + newInodeID); +} + + +static status_t +bfs_resize_move_inode(Volume* volume, Inode* inode) +{ + Transaction transaction(volume, 0 /* what's this? */); + + block_run run; + status_t status = volume->Allocator().AllocateBlocks(transaction, 0, 0, 1, + 1, run); + // we don't care about where we move at the moment + if (status != B_OK) + return status; + + off_t newInodeID = run.Start() + + (run.AllocationGroup() << volume->AllocationGroupShift()); + + status = inode->Copy(transaction, newInodeID); + if (status != B_OK) + return status; + + status = bfs_resize_update_parent(transaction, inode, newInodeID); + if (status != B_OK) + return status; + + status = bfs_resize_update_index_references(transaction, inode, newInodeID); + if (status != B_OK) + return status; + + status = volume->Free(transaction, inode->BlockRun()); + if (status != B_OK) + return status; + + return transaction.Done(); +} + + +static status_t bfs_ioctl(fs_volume* _volume, fs_vnode* _node, void* _cookie, uint32 cmd, void* buffer, size_t bufferLength) { @@ -689,6 +831,15 @@ bfs_ioctl(fs_volume* _volume, fs_vnode* _node, void* _cookie, uint32 cmd, return volume->WriteSuperBlock(); } + case 56743: + { + // move inode + Vnode vnode(volume, 692); + Inode *inode; + vnode.Get(&inode); + bfs_resize_move_inode(volume, inode); + return B_OK; + } #ifdef DEBUG_FRAGMENTER case 56741: ############################################################################ Commit: e37a875dbfd9af5a9e3491e74bbec41e8da6b6bd Author: Andreas Henriksson <sausageboy@xxxxxxxxx> Date: Mon Jun 25 20:05:35 2012 UTC Committer: ahenriksson <sausageboy@xxxxxxxxx> Commit-Date: Fri Jun 29 15:52:23 2012 UTC Added ability to specify allowed range in BlockAllocator::AllocateBlocks ---------------------------------------------------------------------------- diff --git a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp index 8dc8f3f..c0efc27 100644 --- a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp +++ b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp @@ -747,20 +747,28 @@ BlockAllocator::Uninitialize() } -/*! Tries to allocate between \a minimum, and \a maximum blocks starting - at group \a groupIndex with offset \a start. The resulting allocation - is put into \a run. +/*! Tries to allocate between \a minimum, and \a maximum blocks in the range + from \a beginBlock up to but not including \a endBlock, starting at group + \a groupIndex with offset \a start. The resulting allocation is put into + \a run. + + An \a endBlock value of 0 means no upper limit on the allowed allocation + range. The number of allocated blocks is always a multiple of \a minimum which has to be a power of two value. */ status_t BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, - uint16 start, uint16 maximum, uint16 minimum, block_run& run) + uint16 start, uint16 maximum, uint16 minimum, block_run& run, + off_t beginBlock, off_t endBlock) { if (maximum == 0) return B_BAD_VALUE; + if (endBlock > fVolume->NumBlocks()) + return B_BAD_VALUE; + FUNCTION_START(("group = %ld, start = %u, maximum = %u, minimum = %u\n", groupIndex, start, maximum, minimum)); @@ -769,6 +777,27 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, uint32 bitsPerFullBlock = fVolume->BlockSize() << 3; + // Express the allowed allocation range in terms of allocation groups and + // offsets. + int32 firstAllowedGroup = beginBlock / bitsPerFullBlock; + uint16 firstGroupBegin = beginBlock % bitsPerFullBlock; + + int32 lastAllowedGroup; + int32 lastGroupEnd; + + if (endBlock == 0) { + lastAllowedGroup = fNumGroups - 1; + lastGroupEnd = fGroups[lastAllowedGroup].NumBits(); + } else { + // If endBlock is the first block of an allocation group, the last + // allowed group is the previous, and the end block offset is + // bitsPerFullBlock. + lastAllowedGroup = (endBlock - 1) / bitsPerFullBlock; + lastGroupEnd = endBlock % bitsPerFullBlock; + if (lastGroupEnd == 0) + lastGroupEnd = bitsPerFullBlock; + } + // Find the block_run that can fulfill the request best int32 bestGroup = -1; int32 bestStart = -1; @@ -780,7 +809,19 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, CHECK_ALLOCATION_GROUP(groupIndex); - if (start >= group.NumBits() || group.IsFull()) + if (groupIndex < firstAllowedGroup || groupIndex > lastAllowedGroup) + continue; + + if (groupIndex == firstAllowedGroup && start < firstGroupBegin) + start = firstGroupBegin; + + uint32 end; + if (groupIndex == lastAllowedGroup) + end = lastGroupEnd; + else + end = group.NumBits(); + + if (start >= end || group.IsFull()) continue; // The wanted maximum is smaller than the largest free block in the @@ -793,7 +834,8 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, if (group.fLargestLength < bestLength) continue; - if (group.fLargestStart >= start) { + if (group.fLargestStart >= start + && group.fLargestStart + group.fLargestLength <= (int32)end) { if (group.fLargestLength >= bestLength) { bestGroup = groupIndex; bestStart = group.fLargestStart; @@ -813,23 +855,35 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, // we iterate through it to find a place for the allocation. // (one allocation can't exceed one allocation group) - uint32 block = start / (fVolume->BlockSize() << 3); + uint32 block = start / bitsPerFullBlock; int32 currentStart = 0, currentLength = 0; int32 groupLargestStart = -1; int32 groupLargestLength = -1; int32 currentBit = start; - bool canFindGroupLargest = start == 0; + bool canFindGroupLargest = start == 0 && end == group.NumBits(); - for (; block < group.NumBlocks(); block++) { + // If end is the first bit in a block, the last block considered is the + // previous one, and the end bit is bitsPerFullBlock. + uint32 lastBlock = (end - 1) / bitsPerFullBlock; + uint32 lastBlockEndBit = end % bitsPerFullBlock; + if (lastBlockEndBit == 0) + lastBlockEndBit = bitsPerFullBlock; + + for (; block <= lastBlock; block++) { if (cached.SetTo(group, block) < B_OK) RETURN_ERROR(B_ERROR); T(Block("alloc-in", group.Start() + block, cached.Block(), fVolume->BlockSize(), groupIndex, currentStart)); + uint32 endBit; + if (block == lastBlock) + endBit = lastBlockEndBit; + else + endBit = cached.NumBlockBits(); + // find a block large enough to hold the allocation - for (uint32 bit = start % bitsPerFullBlock; - bit < cached.NumBlockBits(); bit++) { + for (uint32 bit = start % bitsPerFullBlock; bit < endBit; bit++) { if (!cached.IsUsed(bit)) { if (currentLength == 0) { // start new range @@ -861,7 +915,7 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, <= groupLargestLength) { // We can't find a bigger block in this group anymore, // let's skip the rest. - block = group.NumBlocks(); + block = lastBlock + 1; break; } } @@ -880,7 +934,7 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, start = 0; } - if (currentBit == (int32)group.NumBits()) { + if (currentBit == (int32)end) { if (currentLength > bestLength) { bestGroup = groupIndex; bestStart = currentStart; @@ -924,6 +978,9 @@ BlockAllocator::AllocateBlocks(Transaction& transaction, int32 groupIndex, run.start = HOST_ENDIAN_TO_BFS_INT16(bestStart); run.length = HOST_ENDIAN_TO_BFS_INT16(bestLength); + ASSERT(fVolume->ToBlock(run) >= beginBlock); + ASSERT(endBlock == 0 || fVolume->ToBlock(run) + run.Length() <= endBlock); + fVolume->SuperBlock().used_blocks = HOST_ENDIAN_TO_BFS_INT64(fVolume->UsedBlocks() + bestLength); // We are not writing back the disk's super block - it's diff --git a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h index ba490ba..74d2d8a 100644 --- a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h +++ b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h @@ -44,7 +44,8 @@ public: status_t AllocateBlocks(Transaction& transaction, int32 group, uint16 start, uint16 numBlocks, - uint16 minimum, block_run& run); + uint16 minimum, block_run& run, + off_t beginBlock = 0, off_t endBlock = 0); status_t StartChecking(const check_control* control); status_t StopChecking(check_control* control); ############################################################################ Commit: f534a0f0a8a1945d3a7ce08a06405166689ca157 Author: Andreas Henriksson <sausageboy@xxxxxxxxx> Date: Fri Jun 29 13:19:40 2012 UTC Committer: ahenriksson <sausageboy@xxxxxxxxx> Commit-Date: Fri Jun 29 15:52:24 2012 UTC Factored the code to add a block_run to a data_stream out of _GrowStream() This is done so the code can be reused when moving the file stream. ---------------------------------------------------------------------------- diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.cpp b/src/add-ons/kernel/file_systems/bfs/Inode.cpp index df64b8a..8d5716b 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.cpp +++ b/src/add-ons/kernel/file_systems/bfs/Inode.cpp @@ -1688,6 +1688,244 @@ Inode::_AllocateBlockArray(Transaction& transaction, block_run& run, } +/*! Adds \a run to \a data, allocating indirection blocks if necessary. + If the block run cannot be added as is, due to constraints on block run + size in the double indirect range, \a rest is set to the number of blocks + that need to be shaved off the run and the function returns. + + If the physical stream size ends up larger than \a targetSize, the stream + size is set to the target file size. +*/ +status_t +Inode::_AddBlockRun(Transaction& transaction, data_stream* data, block_run run, + off_t targetSize, int32* rest, off_t beginBlock, off_t endBlock) +{ + status_t status; + + if (rest) + *rest = 0; + + bool cutSize = targetSize < data->Size() + + (run.Length() << fVolume->BlockShift()); + // if adding this block_run means overshooting the target stream size, + // we need to set data->size to targetSize. + + // Direct block range + + if (data->Size() <= data->MaxDirectRange()) { + // let's try to put them into the direct block range + int32 free = 0; + for (; free < NUM_DIRECT_BLOCKS; free++) { + if (data->direct[free].IsZero()) + break; + } + + if (free < NUM_DIRECT_BLOCKS) { + // can we merge the last allocated run with the new one? + int32 last = free - 1; + if (free > 0 && data->direct[last].MergeableWith(run)) { + data->direct[last].length = HOST_ENDIAN_TO_BFS_INT16( + data->direct[last].Length() + run.Length()); + } else + data->direct[free] = run; + + data->max_direct_range = HOST_ENDIAN_TO_BFS_INT64( + data->MaxDirectRange() + + run.Length() * fVolume->BlockSize()); + data->size = cutSize ? HOST_ENDIAN_TO_BFS_INT64(targetSize) + : data->max_direct_range; + return B_OK; + } + } + + // Indirect block range + + if (data->Size() <= data->MaxIndirectRange() + || !data->MaxIndirectRange()) { + CachedBlock cached(fVolume); + block_run* runs = NULL; + uint32 free = 0; + off_t block; + + // if there is no indirect block yet, create one + if (data->indirect.IsZero()) { + status = _AllocateBlockArray(transaction, data->indirect, + NUM_ARRAY_BLOCKS, true, beginBlock, endBlock); + if (status != B_OK) + return status; + + data->max_indirect_range = data->max_direct_range; + + // insert the block_run in the first block + runs = (block_run*)cached.SetTo(data->indirect); + } else { + uint32 numberOfRuns = fVolume->BlockSize() / sizeof(block_run); + block = fVolume->ToBlock(data->indirect); + + // search first empty entry + int32 i = 0; + for (; i < data->indirect.Length(); i++) { + if ((runs = (block_run*)cached.SetTo(block + i)) == NULL) + return B_IO_ERROR; + + for (free = 0; free < numberOfRuns; free++) + if (runs[free].IsZero()) + break; + + if (free < numberOfRuns) + break; + } + if (i == data->indirect.Length()) + runs = NULL; + } + + if (runs != NULL) { + // try to insert the run to the last one - note that this + // doesn't take block borders into account, so it could be + // further optimized + cached.MakeWritable(transaction); + + int32 last = free - 1; + if (free > 0 && runs[last].MergeableWith(run)) { + runs[last].length = HOST_ENDIAN_TO_BFS_INT16( + runs[last].Length() + run.Length()); + } else + runs[free] = run; + + data->max_indirect_range = HOST_ENDIAN_TO_BFS_INT64( + data->MaxIndirectRange() + + (run.Length() << fVolume->BlockShift())); + data->size = cutSize ? HOST_ENDIAN_TO_BFS_INT64(targetSize) + : data->max_indirect_range; + return B_OK; + } + } + + // Double indirect block range + + if (data->Size() <= data->MaxDoubleIndirectRange() + || !data->max_double_indirect_range) { + // We make sure here that we have this minimum allocated, so if + // the allocation succeeds, we don't run into an endless loop. + uint16 doubleIndirectBlockLength; + if (!data->max_double_indirect_range) + doubleIndirectBlockLength = _DoubleIndirectBlockLength(); + else + doubleIndirectBlockLength = data->double_indirect.Length(); + + if ((run.Length() % doubleIndirectBlockLength) != 0) { + // The number of allocated blocks isn't a multiple of + // 'doubleIndirectBlockLength', so we return and let the caller + // change this. This can happen the first time the stream grows + // into the double indirect range. + if (rest) { + *rest = run.Length() % doubleIndirectBlockLength; + return B_OK; + } + + // the caller didn't expect rest + return B_BAD_VALUE; + } + + // if there is no double indirect block yet, create one + if (data->double_indirect.IsZero()) { + status = _AllocateBlockArray(transaction, + data->double_indirect, _DoubleIndirectBlockLength(), + beginBlock, endBlock); + if (status != B_OK) + return status; + + data->max_double_indirect_range = data->max_indirect_range; + } + + // calculate the index where to insert the new blocks + + int32 runsPerBlock; + int32 directSize; + int32 indirectSize; + get_double_indirect_sizes(data->double_indirect.Length(), + fVolume->BlockSize(), runsPerBlock, directSize, indirectSize); + + off_t start = data->MaxDoubleIndirectRange() + - data->MaxIndirectRange(); + int32 indirectIndex = start / indirectSize; + int32 index = (start % indirectSize) / directSize; + int32 runsPerArray = runsPerBlock * doubleIndirectBlockLength; + + // distribute the blocks to the array and allocate + // new array blocks when needed + + CachedBlock cached(fVolume); + CachedBlock cachedDirect(fVolume); + block_run* array = NULL; + uint32 runLength = run.Length(); + + while (run.length != 0) { + // get the indirect array block + if (array == NULL) { + uint32 block = indirectIndex / runsPerBlock; + if (block >= doubleIndirectBlockLength) + return EFBIG; + + array = (block_run*)cached.SetTo(fVolume->ToBlock( + data->double_indirect) + block); + if (array == NULL) + return B_IO_ERROR; + } + + do { + // do we need a new array block? + if (array[indirectIndex % runsPerBlock].IsZero()) { + cached.MakeWritable(transaction); + + status = _AllocateBlockArray(transaction, + array[indirectIndex % runsPerBlock], + data->double_indirect.Length(), beginBlock, endBlock); + if (status != B_OK) + return status; + } + + block_run* runs = (block_run*)cachedDirect.SetToWritable( + transaction, fVolume->ToBlock(array[indirectIndex + % runsPerBlock]) + index / runsPerBlock); + if (runs == NULL) + return B_IO_ERROR; + + do { + // insert the block_run into the array + runs[index % runsPerBlock] = run; + runs[index % runsPerBlock].length + = HOST_ENDIAN_TO_BFS_INT16(doubleIndirectBlockLength); + + // alter the remaining block_run + run.start = HOST_ENDIAN_TO_BFS_INT16(run.Start() + + doubleIndirectBlockLength); + run.length = HOST_ENDIAN_TO_BFS_INT16(run.Length() + - doubleIndirectBlockLength); + } while ((++index % runsPerBlock) != 0 && run.length); + } while ((index % runsPerArray) != 0 && run.length); + + if (index == runsPerArray) + index = 0; + if (++indirectIndex % runsPerBlock == 0) { + array = NULL; + index = 0; + } + } + + data->max_double_indirect_range = HOST_ENDIAN_TO_BFS_INT64( + data->MaxDoubleIndirectRange() + + (runLength << fVolume->BlockShift())); + data->size = cutSize ? HOST_ENDIAN_TO_BFS_INT64(targetSize) + : data->max_double_indirect_range; + + return B_OK; + } + + RETURN_ERROR(EFBIG); +} + + /*! Grows the stream to \a size, and fills the direct/indirect/double indirect ranges with the runs. This method will also determine the size of the preallocation, if any. @@ -1776,6 +2014,12 @@ Inode::_GrowStream(Transaction& transaction, off_t size) // the requested blocks do not need to be returned with a // single allocation, so we need to iterate until we have // enough blocks allocated + + // If data has a double_indirect block, we're adding block_run:s to + // the double indirect range. + if (!data->double_indirect.IsZero()) + minimum = data->double_indirect.Length(); + if (minimum > 1) { // make sure that "blocks" is a multiple of minimum blocksRequested = round_up(blocksRequested, minimum); @@ -1794,227 +2038,39 @@ Inode::_GrowStream(Transaction& transaction, off_t size) // don't preallocate if the first allocation was already too small blocksRequested = blocksNeeded; - // Direct block range - - if (data->Size() <= data->MaxDirectRange()) { - // let's try to put them into the direct block range - int32 free = 0; - for (; free < NUM_DIRECT_BLOCKS; free++) { - if (data->direct[free].IsZero()) - break; - } - - if (free < NUM_DIRECT_BLOCKS) { - // can we merge the last allocated run with the new one? - int32 last = free - 1; - if (free > 0 && data->direct[last].MergeableWith(run)) { - data->direct[last].length = HOST_ENDIAN_TO_BFS_INT16( - data->direct[last].Length() + run.Length()); - } else - data->direct[free] = run; - - data->max_direct_range = HOST_ENDIAN_TO_BFS_INT64( - data->MaxDirectRange() - + run.Length() * fVolume->BlockSize()); - data->size = HOST_ENDIAN_TO_BFS_INT64(blocksNeeded > 0 - ? data->max_direct_range : size); - continue; - } - } + int32 rest; + status = _AddBlockRun(transaction, data, run, size, &rest); + if (status != B_OK) + return status; - // Indirect block range + if (rest != 0) { + // We've entered the double indirect range, and the number of + // allocated blocks isn't a multiple of 'doubleIndirectBlockLength' - if (data->Size() <= data->MaxIndirectRange() - || !data->MaxIndirectRange()) { - CachedBlock cached(fVolume); - block_run* runs = NULL; - uint32 free = 0; - off_t block; - - // if there is no indirect block yet, create one - if (data->indirect.IsZero()) { - status = _AllocateBlockArray(transaction, data->indirect, - NUM_ARRAY_BLOCKS, true); - if (status != B_OK) - return status; - - data->max_indirect_range = HOST_ENDIAN_TO_BFS_INT64( - data->MaxDirectRange()); - // insert the block_run in the first block - runs = (block_run*)cached.SetTo(data->indirect); - } else { - uint32 numberOfRuns = fVolume->BlockSize() / sizeof(block_run); - block = fVolume->ToBlock(data->indirect); + minimum = _DoubleIndirectBlockLength(); - // search first empty entry - int32 i = 0; - for (; i < data->indirect.Length(); i++) { - if ((runs = (block_run*)cached.SetTo(block + i)) == NULL) - return B_IO_ERROR; + // Free the remaining blocks that don't fit into this multiple. + run.length = HOST_ENDIAN_TO_BFS_INT16(run.Length() - rest); - for (free = 0; free < numberOfRuns; free++) - if (runs[free].IsZero()) - break; + status = fVolume->Free(transaction, + block_run::Run(run.AllocationGroup(), + run.Start() + run.Length(), rest)); + if (status != B_OK) + return status; - if (free < numberOfRuns) - break; - } - if (i == data->indirect.Length()) - runs = NULL; - } + blocksNeeded += rest; + blocksRequested = round_up(blocksNeeded, minimum); - if (runs != NULL) { - // try to insert the run to the last one - note that this - // doesn't take block borders into account, so it could be - // further optimized - cached.MakeWritable(transaction); - - int32 last = free - 1; - if (free > 0 && runs[last].MergeableWith(run)) { - runs[last].length = HOST_ENDIAN_TO_BFS_INT16( - runs[last].Length() + run.Length()); - } else - runs[free] = run; - - data->max_indirect_range = HOST_ENDIAN_TO_BFS_INT64( - data->MaxIndirectRange() - + (run.Length() << fVolume->BlockShift())); - data->size = HOST_ENDIAN_TO_BFS_INT64(blocksNeeded > 0 - ? data->MaxIndirectRange() : size); + // Are there any blocks left in the run? If not, allocate + // a new one + if (run.length == 0) continue; - } - } - - // Double indirect block range - if (data->Size() <= data->MaxDoubleIndirectRange() - || !data->max_double_indirect_range) { - // We make sure here that we have this minimum allocated, so if - // the allocation succeeds, we don't run into an endless loop. - if (!data->max_double_indirect_range) - minimum = _DoubleIndirectBlockLength(); - else - minimum = data->double_indirect.Length(); - - if ((run.Length() % minimum) != 0) { - // The number of allocated blocks isn't a multiple of 'minimum', - // so we have to change this. This can happen the first time the - // stream grows into the double indirect range. - // First, free the remaining blocks that don't fit into this - // multiple. - int32 rest = run.Length() % minimum; - run.length = HOST_ENDIAN_TO_BFS_INT16(run.Length() - rest); - - status = fVolume->Free(transaction, - block_run::Run(run.AllocationGroup(), - run.Start() + run.Length(), rest)); - if (status != B_OK) - return status; - - blocksNeeded += rest; - blocksRequested = round_up(blocksNeeded, minimum); - - // Are there any blocks left in the run? If not, allocate - // a new one - if (run.length == 0) - continue; - } - - // if there is no double indirect block yet, create one - if (data->double_indirect.IsZero()) { - status = _AllocateBlockArray(transaction, - data->double_indirect, _DoubleIndirectBlockLength()); - if (status != B_OK) - return status; - - data->max_double_indirect_range = data->max_indirect_range; - } - - // calculate the index where to insert the new blocks - - int32 runsPerBlock; - int32 directSize; - int32 indirectSize; - get_double_indirect_sizes(data->double_indirect.Length(), - fVolume->BlockSize(), runsPerBlock, directSize, indirectSize); - - off_t start = data->MaxDoubleIndirectRange() - - data->MaxIndirectRange(); - int32 indirectIndex = start / indirectSize; - int32 index = (start % indirectSize) / directSize; - int32 runsPerArray = runsPerBlock * minimum; - - // distribute the blocks to the array and allocate - // new array blocks when needed - - CachedBlock cached(fVolume); - CachedBlock cachedDirect(fVolume); - block_run* array = NULL; - uint32 runLength = run.Length(); - - while (run.length != 0) { - // get the indirect array block - if (array == NULL) { - uint32 block = indirectIndex / runsPerBlock; - if (block >= minimum) - return EFBIG; - - array = (block_run*)cached.SetTo(fVolume->ToBlock( - data->double_indirect) + block); - if (array == NULL) - return B_IO_ERROR; - } - - do { - // do we need a new array block? - if (array[indirectIndex % runsPerBlock].IsZero()) { - cached.MakeWritable(transaction); - - status = _AllocateBlockArray(transaction, - array[indirectIndex % runsPerBlock], - data->double_indirect.Length()); - if (status != B_OK) - return status; - } - - block_run* runs = (block_run*)cachedDirect.SetToWritable( - transaction, fVolume->ToBlock(array[indirectIndex - % runsPerBlock]) + index / runsPerBlock); - if (runs == NULL) - return B_IO_ERROR; - - do { - // insert the block_run into the array - runs[index % runsPerBlock] = run; - runs[index % runsPerBlock].length - = HOST_ENDIAN_TO_BFS_INT16(minimum); - - // alter the remaining block_run - run.start = HOST_ENDIAN_TO_BFS_INT16(run.Start() - + minimum); - run.length = HOST_ENDIAN_TO_BFS_INT16(run.Length() - - minimum); - } while ((++index % runsPerBlock) != 0 && run.length); - } while ((index % runsPerArray) != 0 && run.length); - - if (index == runsPerArray) - index = 0; - if (++indirectIndex % runsPerBlock == 0) { - array = NULL; - index = 0; - } - } - - data->max_double_indirect_range = HOST_ENDIAN_TO_BFS_INT64( - data->MaxDoubleIndirectRange() - + (runLength << fVolume->BlockShift())); - data->size = blocksNeeded > 0 ? HOST_ENDIAN_TO_BFS_INT64( - data->max_double_indirect_range) : size; - - continue; + // Otherwise, add the block run to the stream + status = _AddBlockRun(transaction, data, run, size); + if (status != B_OK) + return status; } - - RETURN_ERROR(EFBIG); } // update the size of the data stream data->size = HOST_ENDIAN_TO_BFS_INT64(size); diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.h b/src/add-ons/kernel/file_systems/bfs/Inode.h index 9f44469..46d1a13 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.h +++ b/src/add-ons/kernel/file_systems/bfs/Inode.h @@ -248,6 +248,10 @@ private: off_t size); status_t _ShrinkStream(Transaction& transaction, off_t size); + status_t _AddBlockRun(Transaction& transaction, + data_stream* data, block_run run, + off_t targetSize, int32* rest = NULL, + off_t beginBlock = 0, off_t endBlock = 0); private: rw_lock fLock; ############################################################################ Commit: 62e1c717aeab270bf368387c0f768e6e43b1bfc1 Author: Andreas Henriksson <sausageboy@xxxxxxxxx> Date: Fri Jun 29 13:25:28 2012 UTC Committer: ahenriksson <sausageboy@xxxxxxxxx> Commit-Date: Fri Jun 29 15:52:26 2012 UTC Added ability to move a file stream to BFS StreamInRange checks if a file stream is inside a range MoveStream moves blocks as necessary for the stream to fit in a range ---------------------------------------------------------------------------- diff --git a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp index c0efc27..8573bbc 100644 --- a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp +++ b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.cpp @@ -1020,7 +1020,8 @@ BlockAllocator::AllocateForInode(Transaction& transaction, status_t BlockAllocator::Allocate(Transaction& transaction, Inode* inode, - off_t numBlocks, block_run& run, uint16 minimum) + off_t numBlocks, block_run& run, uint16 minimum, off_t beginBlock, + off_t endBlock) { if (numBlocks <= 0) return B_ERROR; @@ -1073,7 +1074,8 @@ BlockAllocator::Allocate(Transaction& transaction, Inode* inode, group = inode->BlockRun().AllocationGroup() + 1; } - return AllocateBlocks(transaction, group, start, numBlocks, minimum, run); + return AllocateBlocks(transaction, group, start, numBlocks, minimum, run, + beginBlock, endBlock); } diff --git a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h index 74d2d8a..d3b575d 100644 --- a/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h +++ b/src/add-ons/kernel/file_systems/bfs/BlockAllocator.h @@ -39,7 +39,8 @@ public: block_run& run); status_t Allocate(Transaction& transaction, Inode* inode, off_t numBlocks, block_run& run, - uint16 minimum = 1); + uint16 minimum = 1, off_t beginBlock = 0, + off_t endBlock = 0); status_t Free(Transaction& transaction, block_run run); status_t AllocateBlocks(Transaction& transaction, diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.cpp b/src/add-ons/kernel/file_systems/bfs/Inode.cpp index 8d5716b..882ba9c 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.cpp +++ b/src/add-ons/kernel/file_systems/bfs/Inode.cpp @@ -159,6 +159,48 @@ private: }; +/*! Circular buffer storing block runs, which can be read from disk in + chunks at a time. Used in MoveStream(). +*/ +class BlockRunBuffer { +public: + BlockRunBuffer(Volume* volume); + ~BlockRunBuffer(); + + status_t GetBlocks(const uint8** buffer, + off_t blocksRequested, + off_t& blocksWritten); + status_t PutRun(block_run run); + off_t NumBlocks() const { return fNumBlocks; } + bool IsEmpty() const { return fNumBlocks == 0; } + bool IsFull() const; + + void Reset(); + + status_t AllocateBuffers(); + +private: + Volume* fVolume; + + // fields to manage the circular buffer of block_run's + block_run* fBuffer; + static const int32 kSize = 512; + // 512 * sizeof(block_run) == 4 kB + + int32 fHead; + int32 fTail; + + uint16 fOffset; + off_t fNumBlocks; + + // buffer to store block data as we write it to disk + uint8* fDataBuffer; + uint32 fDataBufferSize; + static const int32 kTargetDataBufferSize = 16 * 1024 * 1024; + // == 16 MB +}; + + InodeAllocator::InodeAllocator(Transaction& transaction) : fTransaction(&transaction), @@ -294,6 +336,131 @@ InodeAllocator::_TransactionListener(int32 id, int32 event, void* _inode) // #pragma mark - +BlockRunBuffer::BlockRunBuffer(Volume* volume) + : + fVolume(volume), + fBuffer(NULL), + fDataBuffer(NULL) +{ + Reset(); +} + + +BlockRunBuffer::~BlockRunBuffer() +{ + delete[] fBuffer; + delete[] fDataBuffer; +} + + +void +BlockRunBuffer::Reset() +{ + fHead = fTail = fOffset = fNumBlocks = 0; +} + + +bool +BlockRunBuffer::IsFull() const +{ + // The buffer is full when the tail points to the block before the head. + // The remaining free position is ignored to make it easier to separate + // the full state from the empty state. + return fHead == (fTail + 1) % kSize; +} + + +status_t +BlockRunBuffer::PutRun(block_run run) +{ + if (fBuffer == NULL || fDataBuffer == NULL) + return B_NO_INIT; + + if (IsFull()) + return B_NO_MEMORY; + + fBuffer[fTail] = run; + fTail = (fTail + 1) % kSize; + + fNumBlocks += run.Length(); + + return B_OK; +} + + +status_t +BlockRunBuffer::GetBlocks(const uint8** buffer, off_t blocksRequested, + off_t& blocksWritten) +{ + if (fBuffer == NULL || fDataBuffer == NULL) + return B_NO_INIT; + + uint32 blockShift = fVolume->BlockShift(); + uint32 blockSize = fVolume->BlockSize(); + + // the amount of blocks we can return is the minimum of the blocks + // requested, the blocks in the buffer and the size of the data buffer + blocksWritten = blocksRequested; + + if (blocksWritten > fNumBlocks) + blocksWritten = fNumBlocks; + + if (blocksWritten > (fDataBufferSize >> blockShift)) + blocksWritten = fDataBufferSize >> blockShift; + + CachedBlock cached(fVolume); + for (off_t i = 0; i < blocksWritten; i++) { + const uint8* data + = cached.SetTo(fVolume->ToBlock(fBuffer[fHead]) + fOffset); + + memcpy(fDataBuffer + (i << blockShift), data, blockSize); + + if (++fOffset == fBuffer[fHead].Length()) { + fHead = (fHead + 1) % kSize; + fOffset = 0; + } + + fNumBlocks--; + ASSERT(fNumBlocks >= 0); + } + + *buffer = fDataBuffer; + return B_OK; +} + + +status_t +BlockRunBuffer::AllocateBuffers() +{ + // allocate block run buffer + fBuffer = new(std::nothrow) block_run[kSize]; + if (fBuffer == NULL) + return B_NO_MEMORY; + + // allocate data buffer + fDataBufferSize = kTargetDataBufferSize; + + while (fDataBufferSize > fVolume->BlockSize()) { + fDataBuffer = new(std::nothrow) uint8[fDataBufferSize]; + if (fDataBuffer != NULL) + return B_OK; + + fDataBufferSize /= 4; + } + + // don't allocate a data buffer which is smaller than the block size + fDataBuffer = new(std::nothrow) uint8[fVolume->BlockSize()]; + if (fDataBuffer == NULL) + return B_NO_MEMORY; + + fDataBufferSize = fVolume->BlockSize(); + return B_OK; +} + + +// #pragma mark - + + status_t bfs_inode::InitCheck(Volume* volume) const { @@ -1431,9 +1598,9 @@ Inode::AllocatedSize() const The caller has to make sure that "pos" is inside the stream. */ status_t -Inode::FindBlockRun(off_t pos, block_run& run, off_t& offset) +Inode::FindBlockRun(off_t pos, block_run& run, off_t& offset) const { - data_stream* data = &Node().data; + const data_stream* data = &Node().data; // find matching block run @@ -1664,13 +1831,13 @@ Inode::FillGapWithZeros(off_t pos, off_t newSize) */ status_t Inode::_AllocateBlockArray(Transaction& transaction, block_run& run, - size_t length, bool variableSize) + size_t length, bool variableSize, off_t beginBlock, off_t endBlock) { if (!run.IsZero()) return B_BAD_VALUE; status_t status = fVolume->Allocate(transaction, this, length, run, - variableSize ? 1 : length); + variableSize ? 1 : length, beginBlock, endBlock); if (status != B_OK) return status; @@ -2491,6 +2658,317 @@ Inode::Sync() } +/*! Write the block runs in \a buffer to \a data. + If \a flush is set, we always empty the buffer even if it means + allocating a block run which is larger than the availible blocks. +*/ +status_t +Inode::_WriteBufferedRuns(Transaction& transaction, BlockRunBuffer& buffer, + data_stream* data, off_t targetSize, bool flush, off_t beginBlock, + off_t endBlock) +{ + status_t status; + off_t doubleIndirectBlockLength = _DoubleIndirectBlockLength(); + uint32 blockShift = fVolume->BlockShift(); + + + bool inDoubleIndirect = data->MaxIndirectRange() != 0 + && data->Size() > data->MaxIndirectRange(); + // we might be about to add a block_run to the double indirect range + // even if this is false, we'll discover that case when we try + + while (buffer.NumBlocks() > 0) { + block_run run; + off_t blocksToWrite; + + // allocate a block_run + if (inDoubleIndirect) { + // stop if we can't fill a double indirect file-data block_run, + // unless we're flushing out all of the buffer + if (buffer.NumBlocks() < doubleIndirectBlockLength + && flush == false) { + break; + } + + status = fVolume->Allocator().AllocateBlocks(transaction, 0, 0, + buffer.NumBlocks(), doubleIndirectBlockLength, run, + beginBlock, endBlock); + if (status != B_OK) + return status; + + // if we're flushing the buffer while writing to the double + // indirect section, the amount of blocks to write can be smaller + // than the allocated run + if (buffer.NumBlocks() < run.Length()) + blocksToWrite = buffer.NumBlocks(); + else + blocksToWrite = run.Length(); + } else { + status = fVolume->Allocator().AllocateBlocks(transaction, 0, 0, + buffer.NumBlocks(), 1, run, beginBlock, endBlock); + if (status != B_OK) + return status; + + blocksToWrite = run.Length(); + } + + // add the run to the data stream + int32 rest; + status = _AddBlockRun(transaction, data, run, targetSize, + &rest, beginBlock, endBlock); + if (status != B_OK) + return status; + + if (rest != 0) { + // oh no, we tried to add a misshaped block_run to the double + // indirect range, free it and try again. + ASSERT(inDoubleIndirect == false); + inDoubleIndirect = true; + + status = fVolume->Free(transaction, run); + if (status != B_OK) + return status; + + continue; + } + + // copy the stream data to our run + for (off_t i = 0; i < blocksToWrite; ) { + off_t blocksRead; + + const uint8* sourceData; + status = buffer.GetBlocks(&sourceData, blocksToWrite, blocksRead); + if (status != B_OK) + return status; + + // Don't use the journal to copy file data. These blocks are + // currently not referenced by the file system in any way, so + // we'll be in a valid state even if the transaction fails. + off_t targetBlock = fVolume->ToBlock(run) + i; + + ssize_t written = write_pos(fVolume->Device(), + targetBlock << blockShift, sourceData, + blocksRead << blockShift); + + if (written != blocksRead << blockShift) + return B_IO_ERROR; + + i += blocksRead; + } + } + return B_OK; +} + + +/*! Get the block run that covers the file position \a position. + \a position is updated to point at the first position covered by + the following run. +*/ +status_t +Inode::_GetNextBlockRun(off_t& position, block_run& run) const +{ + if (position >= Size()) + return B_ENTRY_NOT_FOUND; + + off_t offset; + status_t status = FindBlockRun(position, run, offset); + if (status != B_OK) + return status; + + position = offset + (run.Length() << fVolume->BlockShift()); + return B_OK; +} + + +bool +Inode::_BlockRunInRange(block_run run, off_t beginBlock, off_t endBlock) const +{ + off_t runStart = GetVolume()->ToBlock(run); + return runStart >= beginBlock && runStart + run.Length() <= endBlock; +} + + +bool +Inode::_BlockRunOutsideRange(block_run run, off_t beginBlock, off_t endBlock) + const +{ + // note that this is not the opposite of _BlockRunInRange, as we only + // return true if the block run is completely outside the range. + off_t runStart = GetVolume()->ToBlock(run); + return runStart + run.Length() <= beginBlock || runStart >= endBlock; +} + + +/*! Move the file stream to lie completely in the range from \a beginBlock + up to but not including \a endBlock. +*/ +status_t +Inode::MoveStream(off_t beginBlock, off_t endBlock) +{ + status_t status; + data_stream data = {}; + + BlockRunBuffer buffer(fVolume); + status = buffer.AllocateBuffers(); + if (status != B_OK) + return status; + + Stack<block_run> runsToFree; + // no particular reason for using a stack, we just need something to + // store block_run's in + + Transaction transaction(fVolume, 0); + WriteLockInTransaction(transaction); + + off_t position = 0; + while (true) { + block_run run; + + status = _GetNextBlockRun(position, run); + if (status == B_ENTRY_NOT_FOUND) + break; + + if (status != B_OK) + return status; + + if (_BlockRunInRange(run, beginBlock, endBlock)) { + status = _WriteBufferedRuns(transaction, buffer, &data, Size(), + false, beginBlock, endBlock); + if (status != B_OK) + return status; + + // if we're writing to the double indirect range, we might not have + // cleared the buffer + + if (buffer.IsEmpty()) { + int32 rest; + status = _AddBlockRun(transaction, &data, run, Size(), &rest, + beginBlock, endBlock); + if (status != B_OK) + return status; + + if (rest == 0) + continue; + + // we failed to add the block_run as it doesn't fulfill the + // constraints in the double indirect section. + } + } + + // We can't reuse this block_run, add it to the block_run buffer. + if (buffer.IsFull()) { + status = _WriteBufferedRuns(transaction, buffer, &data, Size(), + false, beginBlock, endBlock); + if (status != B_OK) + return status; + } + buffer.PutRun(run); + + if (_BlockRunOutsideRange(run, beginBlock, endBlock)) { + // The block run was outside of the allowed range, which means + // that we can free it now without running the risk of + // reallocating it and overwriting its content. + status = fVolume->Free(transaction, run); + if (status != B_OK) + return status; + } else { + // free the run later + status = runsToFree.Push(run); + if (status != B_OK) + return status; + } + } + + // write all blocks remaining in the buffer to disk + status = _WriteBufferedRuns(transaction, buffer, &data, Size(), true, + beginBlock, endBlock); + if (status != B_OK) + return status; + + ASSERT(data.Size() == Size()); + + // free the blocks we didn't end up reusing + block_run toFree; + while (runsToFree.Pop(&toFree)) { + status = fVolume->Free(transaction, toFree); + if (status != B_OK) + return status; + } + + fNode.data = data; + + return transaction.Done(); +} + + +/*! Check if the file stream is in the range from \a beginBlock up to + but not including \endBlock. +*/ +status_t +Inode::StreamInRange(bool& inRange, off_t beginBlock, off_t endBlock) const +{ + inRange = false; + // now we can simply return if we find a run which is not in the range + + // check that the indirection blocks are in the allowed range + const data_stream* data = &fNode.data; + + if (!data->indirect.IsZero() + && !_BlockRunInRange(data->indirect, beginBlock, endBlock)) { + return B_OK; + } + + if (!data->double_indirect.IsZero()) { + if (!_BlockRunInRange(data->double_indirect, beginBlock, endBlock)) + return B_OK; + + int32 runsPerBlock = fVolume->BlockSize() / sizeof(block_run); + bool endOfStream = false; + CachedBlock cached(fVolume); + + for (uint16 blockIndex = 0; + blockIndex < data->double_indirect.Length(); blockIndex++) { + // get the double indirect array runs in this block + off_t block = fVolume->ToBlock(data->double_indirect) + blockIndex; + const block_run* runArray = (const block_run*)cached.SetTo(block); + + for (int32 i = 0; i < runsPerBlock; i++) { + if (runArray[i].IsZero()) { + endOfStream = true; + break; + } + + if (!_BlockRunInRange(runArray[i], beginBlock, endBlock)) + return B_OK; + } + + if (endOfStream) + break; + } + } + + // check that the block_run's with file data are in the range + off_t position = 0; + while (true) { + block_run run; + status_t status = _GetNextBlockRun(position, run); + if (status == B_ENTRY_NOT_FOUND) + break; + + if (status != B_OK) + return status; + + if (!_BlockRunInRange(run, beginBlock, endBlock)) + return B_OK; + } + + // all stream blocks were in the range + + inRange = true; + return B_OK; +} + + // #pragma mark - TransactionListener implementation diff --git a/src/add-ons/kernel/file_systems/bfs/Inode.h b/src/add-ons/kernel/file_systems/bfs/Inode.h index 46d1a13..46148c4 100644 --- a/src/add-ons/kernel/file_systems/bfs/Inode.h +++ b/src/add-ons/kernel/file_systems/bfs/Inode.h @@ -21,6 +21,9 @@ class Index; class InodeAllocator; class NodeGetter; class Transaction; +class BlockRunBuffer; + + class Inode : public TransactionListener { @@ -136,7 +139,7 @@ public: // manipulating the data stream status_t FindBlockRun(off_t pos, block_run& run, - off_t& offset); + off_t& offset) const; status_t ReadAt(off_t pos, uint8* buffer, size_t* length); status_t WriteAt(Transaction& transaction, off_t pos, @@ -149,6 +152,11 @@ public: status_t TrimPreallocation(Transaction& transaction); bool NeedsTrimming() const; + status_t MoveStream(off_t beginBlock, off_t endBlock); + status_t StreamInRange(bool& inRange, off_t beginBlock, + off_t endBlock) const; + + status_t Free(Transaction& transaction); status_t Sync(); @@ -243,7 +251,8 @@ private: off_t size, off_t& offset, off_t& max); status_t _AllocateBlockArray(Transaction& transaction, block_run& run, size_t length, - bool variableSize = false); + bool variableSize = false, + off_t beginBlock = 0, off_t endBlock = 0); status_t _GrowStream(Transaction& transaction, off_t size); status_t _ShrinkStream(Transaction& transaction, @@ -253,6 +262,19 @@ private: off_t targetSize, int32* rest = NULL, off_t beginBlock = 0, off_t endBlock = 0); + // moving the data stream + status_t _WriteBufferedRuns(Transaction& transaction, + BlockRunBuffer& buffer, data_stream* data, + off_t targetSize, bool flush = false, + off_t beginBlock = 0, off_t endBlock = 0); + + status_t _GetNextBlockRun(off_t& position, + block_run& run) const; + bool _BlockRunInRange(block_run run, + off_t beginBlock, off_t endBlock) const; + bool _BlockRunOutsideRange(block_run run, + off_t beginBlock, off_t endBlock) const; + private: rw_lock fLock; Volume* fVolume; diff --git a/src/add-ons/kernel/file_systems/bfs/Volume.h b/src/add-ons/kernel/file_systems/bfs/Volume.h index 6d87a0f..a9efe7b 100644 --- a/src/add-ons/kernel/file_systems/bfs/Volume.h +++ b/src/add-ons/kernel/file_systems/bfs/Volume.h @@ -108,7 +108,8 @@ public: block_run& run); status_t Allocate(Transaction& transaction, Inode* inode, off_t numBlocks, block_run& run, - uint16 minimum = 1); + uint16 minimum = 1, off_t beginBlock = 0, + off_t endBlock = 0); status_t Free(Transaction& transaction, block_run run); void SetCheckingThread(thread_id thread) { fCheckingThread = thread; } @@ -207,10 +208,10 @@ Volume::AllocateForInode(Transaction& transaction, const block_run* parent, inline status_t Volume::Allocate(Transaction& transaction, Inode* inode, off_t numBlocks, - block_run& run, uint16 minimum) + block_run& run, uint16 minimum, off_t beginBlock, off_t endBlock) { return fBlockAllocator.Allocate(transaction, inode, numBlocks, run, - minimum); + minimum, beginBlock, endBlock); }