Revision: c43d071f322e Branch: default Author: cmyers@xxxxxxxxxxxxxxxxx Date: Mon Jul 28 18:58:18 2014 UTCLog: Nimas Archiver updated to properly handle segmented files; file sections saved on the fly. Image Describer now has all info necessary to show images properly; image counts per file properly stored.
http://code.google.com/p/brailleblaster/source/detail?r=c43d071f322e Modified: /src/main/org/brailleblaster/BBIni.java /src/main/org/brailleblaster/archiver/Archiver.java /src/main/org/brailleblaster/archiver/EPub3Archiver.java /src/main/org/brailleblaster/archiver/NimasArchiver.java/src/main/org/brailleblaster/perspectives/imageDescriber/ImageDescriberController.java /src/main/org/brailleblaster/perspectives/imageDescriber/document/ImageDescriber.java /src/main/org/brailleblaster/perspectives/imageDescriber/views/ImageDescriberView.java
/src/main/org/brailleblaster/util/Zipper.java ======================================= --- /src/main/org/brailleblaster/BBIni.java Thu Jul 17 20:03:48 2014 UTC +++ /src/main/org/brailleblaster/BBIni.java Mon Jul 28 18:58:18 2014 UTC @@ -47,6 +47,7 @@ import org.eclipse.swt.SWT; import org.liblouis.LibLouis; import org.liblouis.LibLouisUTDML; +import org.liblouis.LogLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -244,6 +245,7 @@ try { LibLouisUTDML.loadLibrary(nativeLibraryPath, nativeLibrarySuffix); // LibLouisUTDML.getInstance().setLogLevel(LogLevel.ERROR); + LibLouisUTDML.getInstance().setLogLevel(LogLevel.OFF);org.brailleblaster.louisutdml.LogHandler louisutdmlLogHandler = new org.brailleblaster.louisutdml.LogHandler();
LibLouis.getInstance().registerLogCallback(louisutdmlLogHandler); LibLouisUTDML.getInstance().registerLogCallback(louisutdmlLogHandler); =======================================--- /src/main/org/brailleblaster/archiver/Archiver.java Thu Jul 24 21:15:30 2014 UTC +++ /src/main/org/brailleblaster/archiver/Archiver.java Mon Jul 28 18:58:18 2014 UTC
@@ -30,6 +30,7 @@ package org.brailleblaster.archiver; +import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +38,6 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; import nu.xom.converters.DOMConverter; @@ -69,13 +69,16 @@ protected String opfFilePath = null; protected Document opfDoc = null; - protected ArrayList<String> spineList = null; NodeList manifestElements; NodeList spineElements; // Every file that makes our epub doc. ArrayList<String> epubFileList = null; + // This is a list of files that we've created that should + // be removed deleted before zipping everything back up. + ArrayList<String> tempList = null; + // Number of images in each file that makes up our document.// For every spine element we have, we're going to count the number of images
// in that file. This helps with image traversal. @@ -97,6 +100,7 @@ documentEdited = false; opfFilePath = null; epubFileList = new ArrayList<String>(); + tempList = new ArrayList<String>(); numImages = new ArrayList<Integer>(); } @@ -285,7 +289,9 @@///////////////////////////////////////////////////////////////////////////////////////////
// Takes in a document(W3C) and adds its image count to the list. - public void addToNumImgsList(Document addMe)+ // ...because of the way we're adding temp documents, we need to know which document
+ // we're loading to associate the right count. + public void addToNumImgsList(Document addMe, int docIdx) {// Create space big enough to hold our image integers if we haven't done so already.
if(numImages == null) @@ -294,13 +300,35 @@ // Grab all <img> elements. NodeList imgElements = addMe.getElementsByTagName("img"); + // If there are no images, return. + if(imgElements == null) + return; + if(imgElements.getLength() == 0) + return; + // Add this value to the list. - numImages.add(imgElements.getLength()); + + // If this count value makes our list bigger, resize and copy. + if(docIdx >= numImages.size()) { + // Create a new list. + ArrayList<Integer> tmpStrList = new ArrayList<Integer>(); + // Make it so big. + for(int curI = 0; curI < docIdx + 1; curI++) + tmpStrList.add(0); + // Copy old into new. + for(int curV = 0; curV < numImages.size(); curV++) + tmpStrList.set( curV, numImages.get(curV) ); + // Point to new list. + numImages = tmpStrList; + } + + // Finally, add the new count. + numImages.set(docIdx, imgElements.getLength()); }///////////////////////////////////////////////////////////////////////////////////////////
// Takes in a document(XOM) and adds its image count to the list. - public void addToNumImgsList(nu.xom.Document addMe) + public void addToNumImgsList(nu.xom.Document addMe, int docIdx) { // Convert to DOM. Document w3cDoc = null; @@ -312,18 +340,17 @@ } catch(Exception e) { e.printStackTrace(); }- // Create space big enough to hold our image integers if we haven't done so already.
- if(numImages == null) - numImages = new ArrayList<Integer>(); - - // Grab all <img> elements. - NodeList imgElements = w3cDoc.getElementsByTagName("img"); - - // Add this value to the list. - numImages.add(imgElements.getLength()); + // Now add. + addToNumImgsList(w3cDoc, docIdx); + } ++ ///////////////////////////////////////////////////////////////////////////////////////////
+ // Just adds counts sequentially to the image count list. + public void addToNumImgsList(int value) { + numImages.add(value); }- /////////////////////////////////////////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////////////////////////////////////////
// Returns the list of documents that make up this book. public ArrayList<String> getSpine() { return epubFileList; @@ -480,4 +507,25 @@ } // prevSpineFile() + ////////////////////////////////////////////////////////////////////// + // Adds temp file to be deleted later. + public void addTempFile(String path) { + tempList.add(path); + } //addTempFile() + + ////////////////////////////////////////////////////////////////////// + // Delete temporary files that we've created in the archiver. + public void deleteTempFiles() + { + // Delete them all. + for(int curFile = 0; curFile < tempList.size(); curFile++) { + // If we actually got around to creating this file, delete it. + if(tempList.get(curFile) != null) { + File f = new File(tempList.get(curFile)); + f.delete(); + } + } + + } // deleteTempFiles() + } // class Archiver =======================================--- /src/main/org/brailleblaster/archiver/EPub3Archiver.java Thu Jul 24 21:15:30 2014 UTC +++ /src/main/org/brailleblaster/archiver/EPub3Archiver.java Mon Jul 28 18:58:18 2014 UTC
@@ -218,7 +218,7 @@ mainHtmlElement = mainDoc.getElementsByTagName("html"); // Add image count to list. - addToNumImgsList(mainDoc); + addToNumImgsList(mainDoc, curSP);// We have the base document, skip to a document that we'll be adding to
// this one. @@ -233,7 +233,7 @@ NodeList newBodyElm = nextDoc.getElementsByTagName("body"); // Add image count to list. - addToNumImgsList(nextDoc); + addToNumImgsList(nextDoc, curSP); ////////////// // Namespaces. =======================================--- /src/main/org/brailleblaster/archiver/NimasArchiver.java Thu Jul 24 19:10:40 2014 UTC +++ /src/main/org/brailleblaster/archiver/NimasArchiver.java Mon Jul 28 18:58:18 2014 UTC
@@ -44,6 +44,7 @@ import java.util.HashSet; import java.util.Set; +import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.dom.DOMSource; @@ -69,6 +70,7 @@ import org.brailleblaster.util.Notify; import org.brailleblaster.util.Zipper; import org.w3c.dom.DOMImplementation; +import org.w3c.dom.NodeList;//////////////////////////////////////////////////////////////////////////////////
// Prepares Nimas Archive for opening. @@ -76,21 +78,32 @@ Set <String> allPaths; - + // The number of documents we COULD have if wrote them all to disk. + int numPotentialFiles = 0; NimasArchiver(String docToPrepare) { + super(docToPrepare); + + currentConfig = getAutoCfg("nimas"); // Nimas document. + filterNames = new String[] {"XML", "XML Zip", "BRF", "UTDML"}; + filterExtensions = new String[] {"*.xml", "*.zip", "*.brf", "*.utd"}; + allPaths = new HashSet<String>(); + + // Unzip file if needed. if(docToPrepare.endsWith(".zip")) unzip(docToPrepare); // Segment the single NIMAS file. This will make rendering // faster in certain perspectives. - writeNimasSegments(); - currentConfig = getAutoCfg("nimas"); // Nimas document. - filterNames = new String[] {"XML", "XML Zip", "BRF", "UTDML"}; - filterExtensions = new String[] {"*.xml", "*.zip", "*.brf", "*.utd"}; - allPaths=new HashSet<String>(); + // CHUCK: TODO: Delete timer stuff. + long timerStart = System.currentTimeMillis(); + + // Write the first file to disk. + wrtieToDisk(0); ++ System.out.println("wrtieToDisk() took: " + (System.currentTimeMillis() - timerStart) + " Milliseconds");
} @Override @@ -100,7 +113,7 @@ path = workingDocPath; if(fu.createXMLFile(doc.getNewXML(), path)){- String tempSemFile = BBIni.getTempFilesPath() + BBIni.getFileSep() + fu.getFileName(path) + ".sem"; + String tempSemFile = BBIni.getTempFilesPath() + BBIni.getFileSep() + fu.getFileName(path) + ".sem"; copySemanticsFile(tempSemFile, fu.getPath(path) + BBIni.getFileSep() + fu.getFileName(path) + ".sem");
} else { @@ -130,7 +143,14 @@// workingFilePath = unzipr.Unzip(fileName, fileName.substring(0, fileName.lastIndexOf(".")) + BBIni.getFileSep());
String sp = BBIni.getFileSep();String tempOutPath = BBIni.getTempFilesPath() + filePath.substring(filePath.lastIndexOf(sp), filePath.lastIndexOf(".")) + sp;
- workingDocPath = unzipr.Unzip(filePath, tempOutPath); + + // CHUCK: TODO: Remove timer stuff. + long timerStart = System.currentTimeMillis(); + + workingDocPath = unzipr.Unzip(filePath, tempOutPath); ++ System.out.println("Unzipping took: " + (System.currentTimeMillis() - timerStart) + " Milliseconds");
+ // Store paths. zippedPath = filePath; } @@ -194,7 +214,30 @@ arch.save(doc, path); return arch; } ++ ///////////////////////////////////////////////////////////////////////////////// + // Clears the list of path indices so we can once again create them on the fly. + // Needed when we save a nimas file. We have to delete all of the temp files,
+ // zip, then recreate them for the user. + public void resetDuplicatePathList() { + allPaths.clear(); + }+ /////////////////////////////////////////////////////////////////////////////////
+ // Resets the path list(resetDuplicatePathList()), and writes a + // chunked document to disk using the specified index. + public void resetThenWrite(int idx) { + resetDuplicatePathList(); + wrtieToDisk(idx); + } ++ /////////////////////////////////////////////////////////////////////////////////
+ // Returns the number of potential files we would have + // if all of them were written to disk. + public int getNumPotentialFiles() { + return numPotentialFiles; + } + /*** * Write to the disk once at time if the file is not there already * @param index @@ -203,20 +246,39 @@ public String wrtieToDisk(int index){ // Build string path.String outPath = workingDocPath.substring(0, workingDocPath.lastIndexOf(BBIni.getFileSep())) + BBIni.getFileSep() + Integer.toString(index) + ".xml";
- if(!(allPaths.contains(Integer.toString(index)))){ + if( !(allPaths.contains(Integer.toString(index))) ){ - Document curDoc=manageNimas(index); + // Break up document by level1 elements and retrieve the current one. + Document curDoc = manageNimas(index); + // Create file utility for saving our xml files. FileUtils fu = new FileUtils(); // Write file. fu.createXMLFile( curDoc, outPath ); allPaths.add(Integer.toString(index)); + + // Save path in archiver and temp list. + // epubFileList is for multi-file traversal and tempList + // is for deleting temp files. + + // Make these lists as big as they should be. + if(epubFileList.size() == 0) { + for(int curF = 0; curF < numPotentialFiles; curF++) { + epubFileList.add(null); + tempList.add(null); + } + } + + // Add new value. + epubFileList.set(index, outPath); + tempList.set(index, outPath); + + // Count the images in this document. + // addToNumImgsList(curDoc, index); } return outPath; - - } /*** @@ -231,14 +293,31 @@ File temp = new File(sourcePath); //get all level1 element Nodes allNode=getLevel1(); + + // Store the number of files we would create if we + // ran through all of the indices. + numPotentialFiles = allNode.size(); + if (index<allNode.size()){ Node node=allNode.get(index); currentDoc=breakDocument(temp,node); - } - return currentDoc; + // Get the number of <img> elements. + if(getImgCountList().size() == 0) + { + // Go through every <level1> element, and count the + // images. + for(int curLvl1 = 0; curLvl1 < allNode.size(); curLvl1++) + { + // Add the count. + Node nd = allNode.get(curLvl1); + addToNumImgsList( new Document((Element)nd.copy()), curLvl1); + } + } + + return currentDoc; } @@ -359,60 +438,60 @@ }/////////////////////////////////////////////////////////////////////////////
- // Writes segmented NIMAS documents to disc and returns a list of their + // Writes segmented NIMAS documents to disk and returns a list of their // paths. - public ArrayList<String> writeNimasSegments() - { - // Grab list of documents after breaking them up by level1's. - ArrayList<Document> docs = manageNimas(); - - // Create file utility for saving our xml files. - FileUtils fu = new FileUtils(); - - // Path to xsl file. Add three slashes to avoid BS.- String xslPath = "file:///" + BBIni.getProgramDataPath() + BBIni.getFileSep() + "xsl" + BBIni.getFileSep() + "dtb2005html.xsl";
+// public ArrayList<String> writeNimasSegments() +// { +// // Grab list of documents after breaking them up by level1's. +// ArrayList<Document> docs = manageNimas(); +// +// // Create file utility for saving our xml files. +// FileUtils fu = new FileUtils(); +// +// // Path to xsl file. Add three slashes to avoid BS.+// String xslPath = "file:///" + BBIni.getProgramDataPath() + BBIni.getFileSep() + "xsl" + BBIni.getFileSep() + "dtb2005html.xsl";
+//// +//// // Build the xsl document. +// Document xslDoc = null; +// Builder builder = new Builder(); +// try { xslDoc = builder.build(xslPath); } +// catch (ValidityException e1) { e1.printStackTrace(); } +// catch (ParsingException e1) { e1.printStackTrace(); } +// catch (IOException e1) { e1.printStackTrace(); } +// +// // Create the transform. +// XSLTransform xslt = null; +// try { xslt = new XSLTransform(xslDoc); } +// catch (XSLException e1) { e1.printStackTrace(); } // -// // Build the xsl document. - Document xslDoc = null; - Builder builder = new Builder(); - try { xslDoc = builder.build(xslPath); } - catch (ValidityException e1) { e1.printStackTrace(); } - catch (ParsingException e1) { e1.printStackTrace(); } - catch (IOException e1) { e1.printStackTrace(); } - - // Create the transform. - XSLTransform xslt = null; - try { xslt = new XSLTransform(xslDoc); } - catch (XSLException e1) { e1.printStackTrace(); } - - // Loop through the documents, write to file, count images in each. - for(int curDoc = 0; curDoc < docs.size(); curDoc++) - { - // Finally transform the document. - Nodes newDocNodes = null; - try { newDocNodes = xslt.transform( docs.get(curDoc) ); } - catch (XSLException e) { e.printStackTrace(); } - Document transformedDoc = XSLTransform.toDocument(newDocNodes); - - // Build string path.- String outPath = workingDocPath.substring(0, workingDocPath.lastIndexOf(BBIni.getFileSep())) + BBIni.getFileSep() + Integer.toString(curDoc) + ".xhtml";
- - // Add path to list. - epubFileList.add(outPath); - - // Count the images in this document. - addToNumImgsList(docs.get(curDoc)); - - // Write file. - fu.createXMLFile( transformedDoc, outPath ); -// fu.createXMLFile( docs.get(curDoc), outPath ); - - } // for(int curDoc... - - // Create the opf file. - return epubFileList; - - } // writeNimasSegments()+// // Loop through the documents, write to file, count images in each.
+// for(int curDoc = 0; curDoc < docs.size(); curDoc++) +// { +// // Finally transform the document. +// Nodes newDocNodes = null; +// try { newDocNodes = xslt.transform( docs.get(curDoc) ); } +// catch (XSLException e) { e.printStackTrace(); }+// Document transformedDoc = XSLTransform.toDocument(newDocNodes);
+// +// // Build string path.+// String outPath = workingDocPath.substring(0, workingDocPath.lastIndexOf(BBIni.getFileSep())) + BBIni.getFileSep() + Integer.toString(curDoc) + ".xhtml";
+// +// // Add path to list. +// epubFileList.add(outPath); +// +// // Count the images in this document. +// addToNumImgsList(docs.get(curDoc)); +// +// // Write file. +// fu.createXMLFile( transformedDoc, outPath ); +//// fu.createXMLFile( docs.get(curDoc), outPath ); +// +// } // for(int curDoc... +// +// // Create the opf file. +// return epubFileList; +// +// } // writeNimasSegments()/////////////////////////////////////////////////////////////////////////////
// Helper: Uses list created with manageNimas() to create an OPF file for =======================================--- /src/main/org/brailleblaster/perspectives/imageDescriber/ImageDescriberController.java Thu Jul 24 21:15:30 2014 UTC +++ /src/main/org/brailleblaster/perspectives/imageDescriber/ImageDescriberController.java Mon Jul 28 18:58:18 2014 UTC
@@ -38,6 +38,7 @@ import org.brailleblaster.archiver.Archiver; import org.brailleblaster.archiver.ArchiverFactory; import org.brailleblaster.archiver.EPub3Archiver; +import org.brailleblaster.archiver.NimasArchiver; import org.brailleblaster.localization.LocaleHandler; import org.brailleblaster.perspectives.Controller;import org.brailleblaster.perspectives.imageDescriber.document.ImageDescriber;
@@ -124,16 +125,12 @@ } public boolean openDocument(String fileName){ + if(fileName != null) arch = ArchiverFactory.getArchive(fileName); else arch = ArchiverFactory.getArchive(templateFile); - // If we have a Nimas file, convert to epub then push along to BB. -// if(arch instanceof NimasArchiver) {-// arch = new EPub3Archiver(fileName, ((NimasArchiver)(arch)).convertToEPUB() );
-// } // if(arch instanceof NimasArchiver) - //////////////// // Recent Files. if(fileName != null) @@ -168,8 +165,8 @@ public void save(){ - // Before saving, delete the temp html file. - idv.disposeHTMLFile(); + // Before saving, delete the temp html files. + arch.deleteTempFiles(); if(arch.getOrigDocPath() == null) saveAs(); @@ -189,8 +186,10 @@ arch.setDocumentEdited(false); } - // Recreate the HTML file, just in case they need it again. - idv.createHTMLFile(); + // Recreate the temp HTML file, just in case they need it again. + if(arch instanceof NimasArchiver) { + ((NimasArchiver) arch).resetThenWrite(arch.getCurSpineIdx()); + } } public void saveAs(){ @@ -200,7 +199,7 @@ if(filePath != null){ // Before saving, delete the temp html file. - idv.disposeHTMLFile(); + arch.deleteTempFiles(); String ext = getFileExt(filePath); arch.saveAs(imgDesc, filePath, ext); @@ -208,7 +207,9 @@ arch.setDocumentEdited(false); // Recreate the HTML file, just in case they need it again. - idv.createHTMLFile(); + if(arch instanceof NimasArchiver) { + ((NimasArchiver) arch).resetThenWrite(arch.getCurSpineIdx()); + } } } @@ -402,7 +403,7 @@ } ///////////////////////////////////////////////////////////////// - // Returns the image desciber "document" + // Returns the image describer "document" public ImageDescriber getDocument() { return imgDesc; } =======================================--- /src/main/org/brailleblaster/perspectives/imageDescriber/document/ImageDescriber.java Mon Jun 2 18:56:58 2014 UTC +++ /src/main/org/brailleblaster/perspectives/imageDescriber/document/ImageDescriber.java Mon Jul 28 18:58:18 2014 UTC
@@ -120,7 +120,7 @@ } } - private void initializeVariables(){ + public void initializeVariables(){ rootElement = doc.getRootElement(); imgElmList = new ArrayList<Element>(); prodCopyList = new ArrayList<String>(); =======================================--- /src/main/org/brailleblaster/perspectives/imageDescriber/views/ImageDescriberView.java Thu Jul 24 21:15:30 2014 UTC +++ /src/main/org/brailleblaster/perspectives/imageDescriber/views/ImageDescriberView.java Mon Jul 28 18:58:18 2014 UTC
@@ -14,7 +14,10 @@ import nu.xom.Nodes; import nu.xom.XPathContext; +import org.apache.commons.io.FileUtils; import org.brailleblaster.BBIni; +import org.brailleblaster.archiver.Archiver; +import org.brailleblaster.archiver.NimasArchiver;import org.brailleblaster.perspectives.imageDescriber.ImageDescriberController; import org.brailleblaster.perspectives.imageDescriber.document.ImageDescriber;
import org.brailleblaster.util.ImageHelper; @@ -110,6 +113,25 @@ // Grab index of current image. int imgIndex = idd.getImageDescriber().getCurrentElementIndex(); + // We probably need to create another page/section. + // This will apply most to NIMAS files. + if( (idd.getArchiver() instanceof NimasArchiver) ) + { + // Get current spine index and number of spine files. + int curSpineIdx = idd.getArchiver().getCurSpineIdx() - 1;+ int numSpineFiles = ((NimasArchiver)(idd.getArchiver())).getNumPotentialFiles();
+ + // Which spine file SHOULD we be on right now? + if(curSpineIdx >= numSpineFiles) + curSpineIdx = 0; + if(curSpineIdx < 0) + curSpineIdx = numSpineFiles - 1; + + // Write the new file. + ((NimasArchiver)(idd.getArchiver())).wrtieToDisk( curSpineIdx ); + + } // if( (idd.getArchiver() instanceof NimasArchiver) ) + // Is it time to move to another page/chapter? String newPath = idd.getArchiver().setSpineFileWithImgIndex(imgIndex); @@ -143,7 +165,26 @@ // Grab index of current image. int imgIndex = idd.getImageDescriber().getCurrentElementIndex(); - // Is it time to move to another page/chapter? + // We probably need to create another page/section. + // This will apply most to NIMAS files. + if( (idd.getArchiver() instanceof NimasArchiver) ) + { + // Get current spine index and number of spine files. + int curSpineIdx = idd.getArchiver().getCurSpineIdx() + 1;+ int numSpineFiles = ((NimasArchiver)(idd.getArchiver())).getNumPotentialFiles();
+ + // Which spine file SHOULD we be on right now? + if(curSpineIdx >= numSpineFiles) + curSpineIdx = 0; + if(curSpineIdx < 0) + curSpineIdx = numSpineFiles - 1; + + // Write the new file. + ((NimasArchiver)(idd.getArchiver())).wrtieToDisk( curSpineIdx ); + + } // if( (idd.getArchiver() instanceof NimasArchiver) ) + + // Move to next section. String newPath = idd.getArchiver().setSpineFileWithImgIndex(imgIndex); // If the page needs to move/change, update. @@ -247,6 +288,25 @@ @Override public void widgetSelected(SelectionEvent e) { + // We probably need to create another page/section. + // This will apply most to NIMAS files. + if( (idd.getArchiver() instanceof NimasArchiver) ) + { + // Get current spine index and number of spine files. + int curSpineIdx = idd.getArchiver().getCurSpineIdx() - 1;+ int numSpineFiles = ((NimasArchiver)(idd.getArchiver())).getNumPotentialFiles();
+ + // Which spine file SHOULD we be on right now? + if(curSpineIdx >= numSpineFiles) + curSpineIdx = 0; + if(curSpineIdx < 0) + curSpineIdx = numSpineFiles - 1; + + // Write the new file. + ((NimasArchiver)(idd.getArchiver())).wrtieToDisk( curSpineIdx ); + + } // if( (idd.getArchiver() instanceof NimasArchiver) ) + // Previous page. curBrowserFilePath = idd.getArchiver().prevSpineFilePath(); @@ -278,6 +338,25 @@ @Override public void widgetSelected(SelectionEvent e) { + // We probably need to create another page/section. + // This will apply most to NIMAS files. + if( (idd.getArchiver() instanceof NimasArchiver) ) + { + // Get current spine index and number of spine files. + int curSpineIdx = idd.getArchiver().getCurSpineIdx() + 1;+ int numSpineFiles = ((NimasArchiver)(idd.getArchiver())).getNumPotentialFiles();
+ + // Which spine file SHOULD we be on right now? + if(curSpineIdx >= numSpineFiles) + curSpineIdx = 0; + if(curSpineIdx < 0) + curSpineIdx = numSpineFiles - 1; + + // Write the new file. + ((NimasArchiver)(idd.getArchiver())).wrtieToDisk( curSpineIdx ); + + } // if( (idd.getArchiver() instanceof NimasArchiver) ) + // Move to next page. curBrowserFilePath = idd.getArchiver().nextSpineFilePath(); @@ -548,45 +627,69 @@ // Create copy of file as html and load into browser widget. if(idd.getWorkingPath() != null && imgDesc.getImageList().size() > 0) { - - // Creates an HTML file from our xml file. - createHTMLFile(); + + // If there are spine paths, that means there are multiple + // files to load. + if(idd.getArchiver().getCurSpineFilePath() != null) { + + // Point to file. + curBrowserFilePath = idd.getArchiver().getCurSpineFilePath(); ++ // If this is a NIMAS document, we need to create a section before loading + // it later. We'll also have to copy/rename it to .html so it will load
+ // properly. + if(idd.getArchiver() instanceof NimasArchiver) { + + // Write the section to disc.+ ((NimasArchiver)(idd.getArchiver())).wrtieToDisk( idd.getArchiver().getCurSpineIdx() );
+ + // Create html version. + File xmlFile = new File(curBrowserFilePath);+ File htmlFile = new File(curBrowserFilePath = curBrowserFilePath.replace(".xml", ".html"));
+ try { FileUtils.copyFile( xmlFile, htmlFile ); } + catch (IOException e) { e.printStackTrace(); } ++ // Add this file path to our temp list so it will get deleted later.
+ idd.getArchiver().addTempFile(curBrowserFilePath); + } + } +- // Progress listener. Adds javascript code that will modify our img elements with
- // height information. - browser.addProgressListener(new ProgressListener() - { - @Override - public void changed(ProgressEvent event) { } - - @Override - public void completed(ProgressEvent event) {+ // Progress listener. Adds javascript code that will modify our img elements with
+ // height information. + browser.addProgressListener(new ProgressListener() + { + @Override + public void changed(ProgressEvent event) { } + + @Override + public void completed(ProgressEvent event) { + + // Refresh the view one time. This fixes + // an issue with certain documents + // not displaying properly in the browser + // view. + if(refreshOnce == true) { + + // Don't do it again! + refreshOnce = false; - // Refresh the view one time. This fixes - // an issue with certain documents - // not displaying properly in the browser - // view. - if(refreshOnce == true) { - - // Don't do it again! - refreshOnce = false; - - // Refresh. - browser.refresh(); - } - - // Resize images so they fit our screen. - // We call it here instead of before the refresh above. - // Otherwise, it doesn't take. - script_resizeImages(); -- // By putting this here, we force the page to scroll after our
- // first refresh on load. - scrollBrowserToCurImg(); - - } // completed() - - }); // addProgressListener() + // Refresh. + browser.refresh(); + } + + // Resize images so they fit our screen. + // We call it here instead of before the refresh above. + // Otherwise, it doesn't take. + script_resizeImages(); ++ // By putting this here, we force the page to scroll after our
+ // first refresh on load. + scrollBrowserToCurImg(); + + } // completed() + + }); // addProgressListener() // Finally, jam the file into the browser widget. browser.setUrl( curBrowserFilePath ); @@ -719,66 +822,14 @@ } // setFormData() - // Copy's the xml file and creates an html file from it. - public void createHTMLFile() - { - // If there are spine paths, that means there are multiple - // files to load, and we don't need to create an html file(EPUB). - if(idd.getArchiver().getCurSpineFilePath() != null) { - curBrowserFilePath = idd.getArchiver().getCurSpineFilePath(); - return; - } - - // Make copy of the file. - File fin = new File(idd.getWorkingPath());- File fout = new File(idd.getWorkingPath().replaceAll(".xml", ".html"));
- try - { - InputStream input = null; - OutputStream output = null; - input = new FileInputStream(fin); - output = new FileOutputStream(fout); - byte[] buf = new byte[1024]; - int bytesRead; - while ((bytesRead = input.read(buf)) > 0) { - output.write(buf, 0, bytesRead); - } - input.close(); - output.close(); - } - catch (FileNotFoundException e1) { e1.printStackTrace(); } - catch (IOException e1) { e1.printStackTrace(); } - - // Get path to full file. - curBrowserFilePath = fout.getAbsolutePath(); - - } //createHTMLFile() - - // Removes temporary HTML from our unzipped directory. - public void disposeHTMLFile() - {- // If there are spine paths, then this is more than likely an EPUB document.
- // Nothing to delete. - if(idd.getArchiver().getCurSpineFilePath() != null) { - curBrowserFilePath = idd.getArchiver().getCurSpineFilePath(); - return; - } - - // Delete the html file we created. - if(curBrowserFilePath != null) - new File(curBrowserFilePath).delete(); - curBrowserFilePath = null; - - } // disposeHTMLFile() - public void disposeUI(){ // Dispose UI stuff. mainImage.getImage().dispose(); group.dispose(); - // Get rid of temp HTML file. - disposeHTMLFile(); + // Get rid of temp files. + idd.getArchiver().deleteTempFiles(); } private void toggleUI(){ =======================================--- /src/main/org/brailleblaster/util/Zipper.java Tue Apr 15 14:34:57 2014 UTC +++ /src/main/org/brailleblaster/util/Zipper.java Mon Jul 28 18:58:18 2014 UTC
@@ -118,7 +118,7 @@ // Get output stream for writing this file out. BufferedOutputStream out = - new BufferedOutputStream( new FileOutputStream(unzipPath), 1000 ); + new BufferedOutputStream( new FileOutputStream(unzipPath), 1000000 ); // Create input stream for this zip entry. InputStream in = zipF.getInputStream(entry); @@ -126,10 +126,10 @@ // Number of bytes read. int count; // Buffer to hold read bytes for writing. - byte data[] = new byte[1000]; + byte data[] = new byte[1000000]; // Read some data, then write some data. - while ( (count = in.read(data, 0, 1000)) != -1 ) + while ( (count = in.read(data, 0, 1000000)) != -1 ) { // Write it! out.write(data, 0, count);