[haiku-commits] haiku: hrev44950 - in src/bin/bfs_tools: . lib

  • From: axeld@xxxxxxxxxxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sun, 2 Dec 2012 21:04:28 +0100 (CET)

hrev44950 adds 4 changesets to branch 'master'
old head: 2ede175119bbd0fd38bf8268781a9a4be192c5c6
new head: eb8a1243258c9a652020087ceeda52d0b17e91fb
overview: http://cgit.haiku-os.org/haiku/log/?qt=range&q=eb8a124+%5E2ede175

----------------------------------------------------------------------------

772c970: Added Hashtable::Size() method.
  
  * Automatic whitespace cleanup.

91d335e: Automatic whitespace cleanup.

fd919c2: bfs_tools: Disk is now using a BBufferIO.
  
  * Automatic whitespace cleanup.

eb8a124: bfs_tools: recover is now using a set to reduce memory.
  
  * The hashtable stored complete Inode objects (albeit without the actual 
block).
  * Now we only store the block_run which should reduce the memory footprint
    considerably; before "recover" could easily run out of memory. In any case,
    a 64 bit version would still make sense to have :-)
  * Saved an extra hash table traversal by counting the node types directly.
  * This isn't that well tested yet, though.

                                   [ Axel Dörfler <axeld@xxxxxxxxxxxxxxxx> ]

----------------------------------------------------------------------------

7 files changed, 174 insertions(+), 121 deletions(-)
src/bin/bfs_tools/lib/Disk.cpp      |  51 +++++----
src/bin/bfs_tools/lib/Disk.h        |   9 +-
src/bin/bfs_tools/lib/Hashtable.cpp |  25 ++--
src/bin/bfs_tools/lib/Hashtable.h   |   3 +-
src/bin/bfs_tools/lib/Inode.cpp     |   8 +-
src/bin/bfs_tools/lib/Inode.h       |   4 +-
src/bin/bfs_tools/recover.cpp       | 195 +++++++++++++++++++-------------

############################################################################

Commit:      772c9704b91df7bd1150d7c80d35e606798cc251
URL:         http://cgit.haiku-os.org/haiku/commit/?id=772c970
Author:      Axel Dörfler <axeld@xxxxxxxxxxxxxxxx>
Date:        Sun Dec  2 19:53:47 2012 UTC

Added Hashtable::Size() method.

* Automatic whitespace cleanup.

----------------------------------------------------------------------------

diff --git a/src/bin/bfs_tools/lib/Hashtable.cpp 
b/src/bin/bfs_tools/lib/Hashtable.cpp
index 0a0e2ba..fe1c637 100644
--- a/src/bin/bfs_tools/lib/Hashtable.cpp
+++ b/src/bin/bfs_tools/lib/Hashtable.cpp
@@ -40,11 +40,11 @@ Hashtable::Hashtable(int capacity, float loadFactor)
 
        if (!capacity)
                capacity = 1;
-       
+
        if (!(fTable = (struct Entry **)malloc(capacity * sizeof(void *))))
                return;
        memset(fTable,0,capacity * sizeof(void *));
-       
+
        fThreshold = (int)(capacity * loadFactor);
        fModCount = 0;
        fLoadFactor = loadFactor;
@@ -58,7 +58,7 @@ Hashtable::Hashtable(int capacity, float loadFactor)
 Hashtable::~Hashtable()
 {
        struct Entry **table = fTable;
-       
+
        for(int32 index = fCapacity;--index >= 0;)
        {
                struct Entry *entry,*next;
@@ -136,7 +136,7 @@ void *Hashtable::Remove(const void *key)
        table = fTable;
        hash = (func = fHashFunc)(key);
        index = hash % fCapacity;
-       
+
        for(entry = table[index],prev = NULL;entry;entry = entry->next)
        {
                if ((func(entry->key) == hash) && fCompareFunc(entry->key,key))
@@ -148,7 +148,7 @@ void *Hashtable::Remove(const void *key)
                                prev->next = entry->next;
                        else
                                table[index] = entry->next;
-                       
+
                        fCount--;
                        value = entry->value;
                        delete entry;
@@ -180,7 +180,7 @@ status_t Hashtable::GetNextEntry(void **value)
                *value = fIteratorEntry->value;
                return B_OK;
        }
-       
+
        return B_ENTRY_NOT_FOUND;
 }
 
@@ -227,10 +227,17 @@ Hashtable::MakeEmpty(int8 keyMode,int8 valueMode)
 }
 
 
+size_t
+Hashtable::Size() const
+{
+       return fCount;
+}
+
+
 /** The hash table will be doubled in size, and rebuild.
  *  @return true on success
  */
- 
+
 bool Hashtable::Rehash()
 {
        uint32 (*hashCode)(const void *) = fHashFunc;
@@ -270,10 +277,10 @@ Hashtable::Entry *Hashtable::GetHashEntry(const void *key)
 {
        Entry **table,*entry;
        uint32 hash,(*func)(const void *);
-       
+
        table = fTable;
        hash = (func = fHashFunc)(key);
-       
+
        for(entry = table[hash % fCapacity];entry;entry = entry->next)
        {
                if ((func(entry->key) == hash) && fCompareFunc(entry->key,key))
diff --git a/src/bin/bfs_tools/lib/Hashtable.h 
b/src/bin/bfs_tools/lib/Hashtable.h
index 30c0b61..99bcd7d 100644
--- a/src/bin/bfs_tools/lib/Hashtable.h
+++ b/src/bin/bfs_tools/lib/Hashtable.h
@@ -33,7 +33,8 @@ class Hashtable
                void    Rewind();
 
                void    MakeEmpty(int8 keyMode = HASH_EMPTY_NONE,int8 valueMode 
= HASH_EMPTY_NONE);
-               
+               size_t  Size() const;
+
        protected:
                class Entry
                {

############################################################################

Commit:      91d335e0b78d01e9029fc6f37e8ac3d1f2d87b51
URL:         http://cgit.haiku-os.org/haiku/commit/?id=91d335e
Author:      Axel Dörfler <axeld@xxxxxxxxxxxxxxxx>
Date:        Sun Dec  2 19:54:16 2012 UTC

Automatic whitespace cleanup.

----------------------------------------------------------------------------

diff --git a/src/bin/bfs_tools/lib/Inode.cpp b/src/bin/bfs_tools/lib/Inode.cpp
index 7b23916..4fab2fd 100644
--- a/src/bin/bfs_tools/lib/Inode.cpp
+++ b/src/bin/bfs_tools/lib/Inode.cpp
@@ -91,7 +91,7 @@ Inode::_Unset()
        free(fPath);
        fPath = NULL;
 
-       delete fAttributes; 
+       delete fAttributes;
        fAttributes = NULL;
 }
 
@@ -522,7 +522,7 @@ Inode::CopyAttributesTo(BNode *node)
                                strerror(written));
                } else if ((size_t)written < size) {
                        printf("could only write %ld bytes (from %ld) at 
attribute \"%s\"\n",
-                               written, size, name); 
+                               written, size, name);
                }
        }
 
@@ -1075,7 +1075,7 @@ Directory::CopyTo(const char *root, bool fullPath, 
Inode::Source *source)
        BPath path(root);
        if (fullPath && Path(source))
                path.Append(Path(source));
-       
+
        char *name = (char *)Name();
        if (name != NULL) {
                // changes the filename in the inode buffer (for deleted 
entries)
@@ -1094,7 +1094,7 @@ Directory::CopyTo(const char *root, bool fullPath, 
Inode::Source *source)
        BDirectory directory;
        if ((status = entry.GetParent(&directory)) < B_OK)
                return status;
-       
+
        status = directory.CreateDirectory(path.Leaf(), NULL);
        if (status < B_OK && status != B_FILE_EXISTS)
                return status;
diff --git a/src/bin/bfs_tools/lib/Inode.h b/src/bin/bfs_tools/lib/Inode.h
index 3a24a40..f06e758 100644
--- a/src/bin/bfs_tools/lib/Inode.h
+++ b/src/bin/bfs_tools/lib/Inode.h
@@ -114,7 +114,7 @@ class DataStream : public Inode, public BPositionIO {
                virtual off_t           Position() const;
 
                virtual status_t        SetSize(off_t size);
-       
+
        private:
                int32           fCurrent;
                int32           fLevel;
@@ -156,7 +156,7 @@ class Directory : public DataStream {
                Directory(Disk *disk, bfs_inode *inode, bool ownBuffer = true);
                Directory(const Inode &inode);
                ~Directory();
-               
+
                virtual status_t        InitCheck();
                virtual status_t        CopyTo(const char *path, bool fullPath 
= true,
                                                                Inode::Source 
*source = NULL);

############################################################################

Commit:      fd919c2c1a167430310ae36a6aff7674e5c6b400
URL:         http://cgit.haiku-os.org/haiku/commit/?id=fd919c2
Author:      Axel Dörfler <axeld@xxxxxxxxxxxxxxxx>
Date:        Sun Dec  2 19:54:50 2012 UTC

bfs_tools: Disk is now using a BBufferIO.

* Automatic whitespace cleanup.

----------------------------------------------------------------------------

diff --git a/src/bin/bfs_tools/lib/Disk.cpp b/src/bin/bfs_tools/lib/Disk.cpp
index 1e2af9d..5571a29 100644
--- a/src/bin/bfs_tools/lib/Disk.cpp
+++ b/src/bin/bfs_tools/lib/Disk.cpp
@@ -95,6 +95,7 @@ Cache<block_run>::Cacheable 
*BlockRunCache::NewCacheable(block_run run)
 
 Disk::Disk(const char *deviceName, bool rawMode, off_t start, off_t stop)
        :
+       fBufferedFile(NULL),
        fRawDiskOffset(0),
        fSize(0LL),
        fCache(this),
@@ -121,6 +122,7 @@ Disk::Disk(const char *deviceName, bool rawMode, off_t 
start, off_t stop)
                //fprintf(stderr,"Could not open file: 
%s\n",strerror(fFile.InitCheck()));
                return;
        }
+       fBufferedFile = new BBufferIO(&fFile, 1024 * 1024, false);
 
        int device = open(deviceName, O_RDONLY);
        if (device < B_OK) {
@@ -153,7 +155,7 @@ Disk::Disk(const char *deviceName, bool rawMode, off_t 
start, off_t stop)
                return;
        }
 
-       if (fFile.ReadAt(512 + fRawDiskOffset, &fSuperBlock,
+       if (fBufferedFile->ReadAt(512 + fRawDiskOffset, &fSuperBlock,
                        sizeof(disk_super_block)) < 1)
                fprintf(stderr,"Disk: Could not read super block\n");
 
@@ -163,6 +165,7 @@ Disk::Disk(const char *deviceName, bool rawMode, off_t 
start, off_t stop)
 
 Disk::~Disk()
 {
+       delete fBufferedFile;
 }
 
 
@@ -171,7 +174,7 @@ status_t Disk::InitCheck()
        status_t status = fFile.InitCheck();
        if (status == B_OK)
                return fSize == 0LL ? B_ERROR : B_OK;
-       
+
        return status;
 }
 
@@ -201,7 +204,7 @@ uint8 *Disk::ReadBlockRun(block_run run)
        CacheableBlockRun *entry = (CacheableBlockRun *)fCache.Get(run);
        if (entry)
                return entry->Data();
-       
+
        return NULL;
 }
 
@@ -234,7 +237,7 @@ Disk::DumpBootBlockToFile()
 //     #pragma mark - Superblock recovery methods
 
 
-status_t 
+status_t
 Disk::ScanForSuperBlock(off_t start, off_t stop)
 {
        printf("Disk size %Ld bytes, %.2f GB\n", fSize, 1.0 * fSize / 
(1024*1024*1024));
@@ -256,7 +259,7 @@ Disk::ScanForSuperBlock(off_t start, off_t stop)
                if (((offset-start) % (blockSize * 100)) == 0)
                        printf("  %12Ld, %.2f GB     %s1A\n",offset,1.0 * 
offset / (1024*1024*1024),escape);
 
-               ssize_t bytes = fFile.ReadAt(offset, buffer, blockSize + 1024);
+               ssize_t bytes = fBufferedFile->ReadAt(offset, buffer, blockSize 
+ 1024);
                if (bytes < B_OK)
                {
                        fprintf(stderr,"Could not read from device: %s\n", 
strerror(bytes));
@@ -322,7 +325,7 @@ Disk::ScanForSuperBlock(off_t start, off_t stop)
        // ToDo: free the other disk infos
 
        fRawDiskOffset = info->offset;
-       fFile.Seek(fRawDiskOffset, SEEK_SET);
+       fBufferedFile->Seek(fRawDiskOffset, SEEK_SET);
 
        if (ValidateSuperBlock(info->super_block))
                fSize = info->super_block.block_size * 
info->super_block.block_size;
@@ -382,7 +385,7 @@ Disk::RecreateSuperBlock()
 
        printf("\tblock size = %ld\n",BlockSize());
 
-       strcpy(fSuperBlock.name,"recovered");   
+       strcpy(fSuperBlock.name,"recovered");
        fSuperBlock.magic1 = SUPER_BLOCK_MAGIC1;
        fSuperBlock.fs_byte_order = SUPER_BLOCK_FS_LENDIAN;
        fSuperBlock.block_shift = get_shift(BlockSize());
@@ -422,7 +425,7 @@ Disk::RecreateSuperBlock()
                GetNextSpecialInode(buffer,&offset,offset + 32LL * 65536 * 
BlockSize(),true);
 
                if (fValidOffset == 0LL)
-               {                       
+               {
                        fprintf(stderr,"FATAL ERROR: Could not find valid 
inode!\n");
                        return B_ERROR;
                }
@@ -439,7 +442,7 @@ Disk::RecreateSuperBlock()
        fSuperBlock.num_ags = 
divide_roundup(fSuperBlock.num_blocks,allocationGroupSize);
 
        // calculate rest of log area
-       
+
        fSuperBlock.log_blocks.allocation_group = fLogStart / 
allocationGroupSize;
        fSuperBlock.log_blocks.start = fLogStart - 
fSuperBlock.log_blocks.allocation_group * allocationGroupSize;
        fSuperBlock.log_blocks.length = LogSize();      // assumed length of 
2048 blocks
@@ -488,7 +491,7 @@ Disk::DetermineBlockSize()
 
        // read a quarter of the drive at maximum
        for (; offset < (fSize >> 2); offset += 1024) {
-               if (fFile.ReadAt(offset, buffer, sizeof(buffer)) < B_OK) {
+               if (fBufferedFile->ReadAt(offset, buffer, sizeof(buffer)) < 
B_OK) {
                        fprintf(stderr, "could not read from device (offset = 
%Ld, "
                                "size = %ld)!\n", offset, sizeof(buffer));
                        status = B_IO_ERROR;
@@ -547,7 +550,7 @@ Disk::GetNextSpecialInode(char *buffer, off_t *_offset, 
off_t end,
        bfs_inode *inode = (bfs_inode *)buffer;
 
        for (; offset < end; offset += BlockSize()) {
-               if (fFile.ReadAt(offset, buffer, 1024) < B_OK) {
+               if (fBufferedFile->ReadAt(offset, buffer, 1024) < B_OK) {
                        fprintf(stderr,"could not read from device (offset = 
%Ld, size = %d)!\n",offset,1024);
                        *_offset = offset;
                        return B_IO_ERROR;
@@ -565,7 +568,7 @@ Disk::GetNextSpecialInode(char *buffer, off_t *_offset, 
off_t end,
                        && offset >= (BlockSize() * (fLogStart + LogSize()))) {
                        fValidBlockRun = inode->inode_num;
                        fValidOffset = offset;
-                       
+
                        if (skipAfterValidInode)
                                return B_OK;
                }
@@ -653,31 +656,31 @@ Disk::ScanForIndexAndRoot(bfs_inode *indexDir,bfs_inode 
*rootDir)
        if (!root) {
                printf("WARNING: Could not find root node at common places!\n");
                printf("\tScanning log area for root node\n");
-               
+
                off_t logOffset = ToOffset(fSuperBlock.log_blocks);
                if (GetNextSpecialInode(buffer,&logOffset,logOffset + LogSize() 
* BlockSize()) == B_OK)
                {
                        SaveInode(inode,&indices,indexDir,&root,rootDir);
-                       
+
                        printf("root node at: 0x%Lx (DiskProbe)\n",logOffset / 
512);
-                       //fFile.ReadAt(logOffset + BlockSize(),buffer,1024);
+                       //fBufferedFile->ReadAt(logOffset + 
BlockSize(),buffer,1024);
                        //if (*(uint32 *)buffer == BPLUSTREE_MAGIC)
                        //{
                        //      puts("\t\tnext block in log contains a 
bplustree!");
                        //}
                }
        }
-       
+
        /*if (!root)
        {
                char txt[64];
                printf("Should I perform a deeper search (that will take some 
time) (Y/N) [N]? ");
                gets(txt);
-               
+
                if (!strcasecmp("y",txt))
                {
                        // search not so common places for the root node (all 
places)
-                       
+
                        if (indices)
                                offset += BlockSize();  // the block after the 
indices inode
                        else
@@ -700,28 +703,28 @@ Disk::ScanForIndexAndRoot(bfs_inode *indexDir,bfs_inode 
*rootDir)
 ssize_t
 Disk::Read(void *buffer, size_t size)
 {
-       return fFile.Read(buffer, size);
+       return fBufferedFile->Read(buffer, size);
 }
 
 
 ssize_t
 Disk::Write(const void *buffer, size_t size)
 {
-       return fFile.Write(buffer, size);
+       return fBufferedFile->Write(buffer, size);
 }
 
 
 ssize_t
 Disk::ReadAt(off_t pos, void *buffer, size_t size)
 {
-       return fFile.ReadAt(pos + fRawDiskOffset, buffer, size);
+       return fBufferedFile->ReadAt(pos + fRawDiskOffset, buffer, size);
 }
 
 
 ssize_t
 Disk::WriteAt(off_t pos, const void *buffer, size_t size)
 {
-       return fFile.WriteAt(pos + fRawDiskOffset, buffer, size);
+       return fBufferedFile->WriteAt(pos + fRawDiskOffset, buffer, size);
 }
 
 
@@ -731,14 +734,14 @@ Disk::Seek(off_t position, uint32 seekMode)
        // ToDo: only correct for seekMode == SEEK_SET, right??
        if (seekMode != SEEK_SET)
                puts("OH NO, I AM BROKEN!");
-       return fFile.Seek(position + fRawDiskOffset, seekMode);
+       return fBufferedFile->Seek(position + fRawDiskOffset, seekMode);
 }
 
 
 off_t
 Disk::Position() const
 {
-       return fFile.Position() - fRawDiskOffset;
+       return fBufferedFile->Position() - fRawDiskOffset;
 }
 
 
diff --git a/src/bin/bfs_tools/lib/Disk.h b/src/bin/bfs_tools/lib/Disk.h
index d1e8997..890a14e 100644
--- a/src/bin/bfs_tools/lib/Disk.h
+++ b/src/bin/bfs_tools/lib/Disk.h
@@ -6,6 +6,7 @@
 */
 
 
+#include <BufferIO.h>
 #include <File.h>
 #include <Path.h>
 
@@ -23,9 +24,9 @@ class BlockRunCache : public Cache<block_run>
        public:
                BlockRunCache(Disk *disk);
 //             ~BlockRunCache();
-               
+
                virtual Cacheable *NewCacheable(block_run run);
-       
+
        protected:
                Disk    *fDisk;
 };
@@ -91,7 +92,9 @@ class Disk : public BPositionIO
 
                status_t                        LoadBootBlock();
 
+       protected:
                BFile                           fFile;
+               BBufferIO*                      fBufferedFile;
                BPath                           fPath;
                off_t                           fRawDiskOffset;
                off_t                           fSize;
@@ -101,7 +104,7 @@ class Disk : public BPositionIO
                block_run                       fValidBlockRun;
                off_t                           fValidOffset;
                off_t                           fLogStart;
-               
+
                BlockRunCache           fCache;
 
                bool                            fRawMode;

############################################################################

Revision:    hrev44950
Commit:      eb8a1243258c9a652020087ceeda52d0b17e91fb
URL:         http://cgit.haiku-os.org/haiku/commit/?id=eb8a124
Author:      Axel Dörfler <axeld@xxxxxxxxxxxxxxxx>
Date:        Sun Dec  2 20:00:18 2012 UTC

bfs_tools: recover is now using a set to reduce memory.

* The hashtable stored complete Inode objects (albeit without the actual block).
* Now we only store the block_run which should reduce the memory footprint
  considerably; before "recover" could easily run out of memory. In any case,
  a 64 bit version would still make sense to have :-)
* Saved an extra hash table traversal by counting the node types directly.
* This isn't that well tested yet, though.

----------------------------------------------------------------------------

diff --git a/src/bin/bfs_tools/recover.cpp b/src/bin/bfs_tools/recover.cpp
index 7a114ad..9a2677e 100644
--- a/src/bin/bfs_tools/recover.cpp
+++ b/src/bin/bfs_tools/recover.cpp
@@ -5,6 +5,8 @@
 //!    recovers corrupt BFS disks
 
 
+#include <set>
+
 #include "Disk.h"
 #include "Inode.h"
 #include "Hashtable.h"
@@ -30,6 +32,10 @@ bool gRawMode = false;
 bool gVerbose = false;
 
 
+// TODO: add a cache for all inodes
+typedef std::set<block_run> RunSet;
+
+
 class InodeHashtable {
        public:
                InodeHashtable(int capacity)
@@ -113,7 +119,7 @@ class InodeHashtable {
 
                static uint32 BlockRunHash(const block_run *run)
                {
-                       return run->allocation_group << 16 | run->start;
+                       return (run->allocation_group << 16) | run->start;
                }
 
                static bool BlockRunCompare(const block_run *runA, const 
block_run *runB)
@@ -127,11 +133,14 @@ class InodeHashtable {
                uint32          fPercentUsed;
 };
 
+
 class InodeGetter {
        public:
-               InodeGetter(InodeHashtable& hashtable, block_run run)
+               InodeGetter(Disk& disk, block_run run)
                {
-                       fInode = hashtable.Get(run);
+                       fInode = Inode::Factory(&disk, run);
+                       if (fInode != NULL)
+                               fInode->AcquireBuffer();
                }
 
                ~InodeGetter()
@@ -140,14 +149,22 @@ class InodeGetter {
                                fInode->ReleaseBuffer();
                }
 
-               Inode* Node() { return fInode; }
+               Inode* Node() const
+               {
+                       return fInode;
+               }
+
+               void Detach()
+               {
+                       fInode = NULL;
+               }
 
        private:
                Inode*  fInode;
 };
 
 
-InodeHashtable gHashtable(1000);
+RunSet gMainInodes;
        // contains all inodes found on disk in the general data area
 InodeHashtable gLogged(50);
        // contains all inodes found in the log area
@@ -156,32 +173,56 @@ InodeHashtable gMissingEmpty(25);
 
 
 class HashtableInodeSource : public Inode::Source {
-       public:
-               virtual Inode *InodeAt(block_run run)
-               {
-                       Inode *inode;
-                       if ((inode = gHashtable.Get(run)) != NULL)
-                               return inode;
+public:
+       HashtableInodeSource(Disk& disk)
+               :
+               fDisk(disk)
+       {
+       }
 
-                       if ((inode = gLogged.Get(run)) != NULL)
-                               return inode;
+       virtual Inode *InodeAt(block_run run)
+       {
+               Inode *inode;
+               if ((inode = gLogged.Get(run)) != NULL)
+                       return inode;
 
-                       if ((inode = gMissing.Get(run)) != NULL)
-                               return inode;
+               if ((inode = gMissing.Get(run)) != NULL)
+                       return inode;
 
+               if (gMainInodes.find(run) == gMainInodes.end())
                        return NULL;
-               }
+
+               return Inode::Factory(&fDisk, run);
+       }
+
+private:
+       Disk&   fDisk;
 };
 
 
+bool
+operator<(const block_run& a, const block_run& b)
+{
+       return a.allocation_group < b.allocation_group
+               || (a.allocation_group == b.allocation_group && a.start < 
b.start);
+}
+
+
 void
-collectInodes(Disk &disk, InodeHashtable &hashtable, off_t start, off_t end)
+collectInodes(Disk& disk, RunSet* set, InodeHashtable* hashTable, off_t start,
+       off_t end)
 {
        char buffer[8192];
        Inode inode(&disk, (bfs_inode *)buffer, false);
+
+       off_t directories = 0LL;
+       off_t directorySize = 0LL;
+       off_t files = 0LL;
+       off_t fileSize = 0LL;
+       off_t symlinks = 0LL;
        off_t count = 0LL;
-       off_t position = start;
 
+       off_t position = start;
        bigtime_t lastUpdate = system_time();
 
        for (off_t offset = start; offset < end; offset += sizeof(buffer)) {
@@ -203,7 +244,21 @@ collectInodes(Disk &disk, InodeHashtable &hashtable, off_t 
start, off_t end)
                                if (node != NULL) {
                                        if (gVerbose)
                                                printf("  node: %Ld \"%s\"\n", 
position, node->Name());
-                                       hashtable.Insert(node);
+
+                                       if (set != NULL)
+                                               set->insert(node->BlockRun());
+                                       else
+                                               hashTable->Insert(node);
+
+                                       if (node->IsDirectory()) {
+                                               directories++;
+                                               directorySize += node->Size();
+                                       } else if (node->IsFile()) {
+                                               files++;
+                                               fileSize += node->Size();
+                                       } else if (node->IsSymlink()) {
+                                               symlinks++;
+                                       }
                                        count++;
                                } else if (gVerbose) {
                                        printf("\nunrecognized inode:");
@@ -213,40 +268,18 @@ collectInodes(Disk &disk, InodeHashtable &hashtable, 
off_t start, off_t end)
                        position += disk.BlockSize();
                }
                if (system_time() - lastUpdate > 500000) {
-                       printf("  block %Ld (%Ld%%), %Ld inodes\33[1A\n", 
offset, 100 * (offset - start) / (end - start), count);
+                       printf("  block %Ld (%Ld%%), %Ld inodes\33[1A\n", 
offset,
+                               100 * (offset - start) / (end - start), count);
                        lastUpdate = system_time();
                }
        }
        printf("\n%Ld inodes found.\n", count);
 
-       Inode *node;
-       off_t directories = 0LL;
-       off_t directorySize = 0LL;
-       off_t files = 0LL;
-       off_t fileSize = 0LL;
-       off_t symlinks = 0LL;
-       count = 0LL;
-
-       hashtable.Rewind();
-       while (hashtable.GetNextEntry(&node) == B_OK) {
-               if (node->IsDirectory()) {
-                       directories++;
-                       directorySize += node->Size();
-               } else if (node->IsFile()) {
-                       files++;
-                       fileSize += node->Size();
-               } else if (node->IsSymlink()) {
-                       symlinks++;
-               }
-               count++;
-               hashtable.Release(node);
-       }
-
        printf("\n%20Ld directories found (total of %Ld bytes)\n"
           "%20Ld files found (total of %Ld bytes)\n"
           "%20Ld symlinks found\n"
           "--------------------\n"
-          "%20Ld inodes total found in hashtable.\n",
+          "%20Ld inodes total found.\n",
           directories, directorySize, files, fileSize, symlinks, count);
 }
 
@@ -259,8 +292,8 @@ collectLogInodes(Disk &disk)
        off_t end = offset + (disk.Log().length << disk.BlockShift());
 
        printf("\nsearching from %Ld to %Ld (log area)\n",offset,end);
-       
-       collectInodes(disk, gLogged, offset, end);
+
+       collectInodes(disk, NULL, &gLogged, offset, end);
 }
 
 
@@ -270,39 +303,41 @@ collectRealInodes(Disk &disk)
        // first block after bootblock, bitmap, and log
        off_t offset = disk.ToOffset(disk.Log()) + (disk.Log().length
                << disk.BlockShift());
-       off_t end = /*(17LL << disk.SuperBlock()->ag_shift);
-       if (end > disk.NumBlocks())
-               end = */disk.NumBlocks();
-       end *= disk.BlockSize();
+       off_t end = (off_t)disk.NumBlocks() << disk.BlockShift();
 
        printf("\nsearching from %Ld to %Ld (main area)\n", offset, end);
 
-       collectInodes(disk, gHashtable, offset, end);
+       collectInodes(disk, &gMainInodes, NULL, offset, end);
 }
 
 
 Directory *
 getNameIndex(Disk &disk)
 {
-       InodeGetter getter(gHashtable, disk.Indices());
+       InodeGetter getter(disk, disk.Indices());
        Directory *indices = dynamic_cast<Directory *>(getter.Node());
 
        block_run run;
-       if (indices && indices->FindEntry("name", &run) == B_OK)
-               return dynamic_cast<Directory *>(gHashtable.Get(run));
+       if (indices != NULL && indices->FindEntry("name", &run) == B_OK) {
+               InodeGetter getter(disk, run);
+               Inode* node = getter.Node();
+               getter.Detach();
+               return dynamic_cast<Directory *>(node);
+       }
 
        // search name index
 
-       Inode *node;
+       RunSet::iterator iterator = gMainInodes.begin();
+       for (; iterator != gMainInodes.end(); iterator++) {
+               InodeGetter getter(disk, *iterator);
+               Inode* node = getter.Node();
 
-       gHashtable.Rewind();
-       for (; gHashtable.GetNextEntry(&node) == B_OK; 
gHashtable.Release(node)) {
                if (!node->IsIndex() || node->Name() == NULL)
                        continue;
-               if (!strcmp(node->Name(), "name") && node->Mode() & 
S_STR_INDEX) {
+               if (!strcmp(node->Name(), "name") && node->Mode() & S_STR_INDEX)
                        return dynamic_cast<Directory *>(node);
-               }
        }
+
        return NULL;
 }
 
@@ -317,7 +352,7 @@ checkDirectoryContents(Disk& disk, Directory *dir)
 
        while (dir->GetNextEntry(name, &run) == B_OK) {
                if (run == dir->BlockRun() || run == dir->Parent()
-                       || gHashtable.Contains(&run))
+                       || gMainInodes.find(run) != gMainInodes.end())
                        continue;
 
                Inode *missing = gMissing.Get(run);
@@ -385,8 +420,12 @@ checkStructure(Disk &disk)
        Inode *node;
 
        off_t count = 0;
-       gHashtable.Rewind();
-       while (gHashtable.GetNextEntry(&node) == B_OK) {
+
+       RunSet::iterator iterator = gMainInodes.begin();
+       for (; iterator != gMainInodes.end(); iterator++) {
+               InodeGetter getter(disk, *iterator);
+               Inode* node = getter.Node();
+
                count++;
                if ((count % 50) == 0)
                        fprintf(stderr, "%Ld inodes processed...\33[1A\n", 
count);
@@ -399,7 +438,7 @@ checkStructure(Disk &disk)
                // check for the parent directory
 
                block_run run = node->Parent();
-               InodeGetter parentGetter(gHashtable, run);
+               InodeGetter parentGetter(disk, run);
                Inode *parentNode = parentGetter.Node();
 
                Directory *dir = dynamic_cast<Directory *>(parentNode);
@@ -503,13 +542,13 @@ checkStructure(Disk &disk)
 //                                     printf("node \"%s\": parent directory 
\"%s\" error: %s\n",node->Name(),dir->Name(),strerror(status));
 //                     }
 
-               // check for attributes 
+               // check for attributes
 
                run = node->Attributes();
                if (!run.IsZero()) {
                        //printf("node \"%s\" (%ld, %d, mode = %010lo): has 
attribute 
dir!\n",node->Name(),node->BlockRun().allocation_group,node->BlockRun().start,node->Mode());
 
-                       if (!gHashtable.Contains(&run)) {
+                       if (gMainInodes.find(run) == gMainInodes.end()) {
                                if (gVerbose) {
                                        printf("node \"%s\": attributes are 
missing (%ld, %d, %d)\n",
                                                node->Name(), 
run.allocation_group, run.start, run.length);
@@ -602,7 +641,7 @@ checkStructure(Disk &disk)
                        block_run run;
                        while (dir->GetNextEntry(name, &run) == B_OK) {
                                printf("\t\"%s\" (%ld, %d, %d)\n", name,
-                                       run.allocation_group, run.start, 
run.length);   
+                                       run.allocation_group, run.start, 
run.length);
                        }
 
                        BPlusTree *tree;
@@ -631,33 +670,34 @@ checkStructure(Disk &disk)
 
 
 void
-copyInodes(const char *copyTo)
+copyInodes(Disk& disk, const char* copyTo)
 {
-       if (!copyTo)
+       if (copyTo == NULL)
                return;
 
-       Inode::Source *source = new HashtableInodeSource;
+       HashtableInodeSource source(disk);
        Inode *node;
 
        int32 count = 0;
 
-       gHashtable.Rewind();
-       while (gHashtable.GetNextEntry(&node) == B_OK) {
+       RunSet::iterator iterator = gMainInodes.begin();
+       for (; iterator != gMainInodes.end(); iterator++) {
+               InodeGetter getter(disk, *iterator);
+               Inode* node = getter.Node();
+
                if (!node->IsIndex() && !node->IsAttributeDirectory())
-                       node->CopyTo(copyTo, source);
+                       node->CopyTo(copyTo, &source);
 
                if ((++count % 500) == 0)
                        fprintf(stderr, "copied %ld files...\n", count);
-
-               gHashtable.Release(node);
        }
 
        gMissing.Rewind();
        while (gMissing.GetNextEntry(&node) == B_OK) {
                if (!node->IsIndex() && !node->IsAttributeDirectory())
-                       node->CopyTo(copyTo, source);
+                       node->CopyTo(copyTo, &source);
 
-               gHashtable.Release(node);
+               gMissing.Release(node);
        }
 }
 
@@ -802,12 +842,11 @@ main(int argc, char **argv)
        checkStructure(disk);
 
        if (argv[1])
-               copyInodes(argv[1]);
+               copyInodes(disk, argv[1]);
 
        //disk.WriteBootBlock();
        //disk.BlockBitmap()->CompareWithBackup();
 
-       gHashtable.MakeEmpty();
        gMissing.MakeEmpty();
        gLogged.MakeEmpty();
 


Other related posts:

  • » [haiku-commits] haiku: hrev44950 - in src/bin/bfs_tools: . lib - axeld