diff --git a/.classpath b/.classpath
deleted file mode 100644
index 97403d05..00000000
--- a/.classpath
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
diff --git a/.hgignore b/.hgignore
new file mode 100644
index 00000000..de8451a4
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,7 @@
+build
+shared/bin
+uploader/bin
+lib/snakeyaml-1.5.jar
+dist/Library.jar
+dist/uploader.jar
+shared/TermEntryTest/test.yml
diff --git a/.project b/.project
deleted file mode 100644
index b40f21ce..00000000
--- a/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
- Library
-
-
-
-
-
- org.eclipse.jdt.core.javabuilder
-
-
-
-
-
- org.eclipse.jdt.core.javanature
-
-
diff --git a/README b/README
index 6da0a190..8681abfb 100644
--- a/README
+++ b/README
@@ -4,6 +4,7 @@ Build:
plugin-Library$ ant
+
== Javadoc ==
If you want to generate Javadocs, download bliki-doclet, which is a little
@@ -56,3 +57,9 @@ this may change soon :
+
+== Ongoing work to split ==
+
+The plugin is in src, test (for historical reasons).
+
+The uploader (standalone program) is in uploader/src and uploader/test depending on fcp and plugin.
diff --git a/README.clean-build b/README.clean-build
index 756e210d..ca465d38 100644
--- a/README.clean-build
+++ b/README.clean-build
@@ -1 +1,2 @@
-Library depends on SnakeYAML. It will fetch it from a known URL with a known version and checksums by default. If you want a completely clean build, fetch it yourself and build it yourself, and put it in lib/SnakeYAML-1.3.jar.
+Library depends on SnakeYAML. It will fetch it from a known URL with a known version and checksums by default.
+If you want a completely clean build, fetch it yourself and build it yourself, and put it in lib/SnakeYAML-1.3.jar.
diff --git a/build.xml b/build.xml
index fa7a6670..e5fad5ef 100644
--- a/build.xml
+++ b/build.xml
@@ -2,10 +2,10 @@
-
+
-
-
+
+
@@ -64,14 +64,14 @@
-
-
-
+
+
+
-
-
-
+
+
+
@@ -80,7 +80,7 @@
-
+
-
+
@@ -146,7 +146,7 @@
-
+
@@ -173,7 +173,7 @@
-
+
diff --git a/loop.sh b/loop.sh
new file mode 100755
index 00000000..c8b87772
--- /dev/null
+++ b/loop.sh
@@ -0,0 +1,7 @@
+#!/bin/sh -x
+
+while test -f library.continue.loop
+do
+ java -jar `dirname $0`/dist/uploader.jar
+ sleep 60
+done
diff --git a/src/plugins/Library/ArchiverFactory.java b/src/plugins/Library/ArchiverFactory.java
new file mode 100644
index 00000000..e881f6e9
--- /dev/null
+++ b/src/plugins/Library/ArchiverFactory.java
@@ -0,0 +1,18 @@
+package plugins.Library;
+
+import plugins.Library.io.ObjectStreamReader;
+import plugins.Library.io.ObjectStreamWriter;
+import plugins.Library.io.serial.LiveArchiver;
+import plugins.Library.util.exec.SimpleProgress;
+
+public interface ArchiverFactory {
+
+ LiveArchiver
+ newArchiver(S rw, String mime, int size,
+ Priority priorityLevel);
+
+
+ LiveArchiver
+ newArchiver(S rw, String mime, int size,
+ LiveArchiver archiver);
+}
diff --git a/src/plugins/Library/FactoryRegister.java b/src/plugins/Library/FactoryRegister.java
new file mode 100644
index 00000000..47ca38e7
--- /dev/null
+++ b/src/plugins/Library/FactoryRegister.java
@@ -0,0 +1,14 @@
+package plugins.Library;
+
+public class FactoryRegister {
+ private static ArchiverFactory archiver = null;
+
+ public static void register(ArchiverFactory factory) {
+ archiver = factory;
+ }
+
+ public static ArchiverFactory getArchiverFactory() {
+ assert archiver != null;
+ return archiver;
+ }
+}
diff --git a/src/plugins/Library/Library.java b/src/plugins/Library/Library.java
index 9a35b099..04d361d8 100644
--- a/src/plugins/Library/Library.java
+++ b/src/plugins/Library/Library.java
@@ -13,20 +13,15 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import plugins.Library.client.FreenetArchiver;
-import plugins.Library.index.ProtoIndex;
-import plugins.Library.index.ProtoIndexSerialiser;
import plugins.Library.index.xml.URLUpdateHook;
import plugins.Library.index.xml.XMLIndex;
-import plugins.Library.io.ObjectStreamReader;
-import plugins.Library.io.ObjectStreamWriter;
-import plugins.Library.io.serial.Serialiser.PullTask;
import plugins.Library.search.InvalidSearchException;
-import plugins.Library.util.exec.TaskAbortException;
import freenet.client.FetchContext;
import freenet.client.FetchException;
@@ -46,6 +41,18 @@
import freenet.client.events.ExpectedMIMEEvent;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
+import plugins.Library.ArchiverFactory;
+import plugins.Library.FactoryRegister;
+import plugins.Library.index.Index;
+import plugins.Library.index.ProtoIndex;
+import plugins.Library.index.ProtoIndexSerialiser;
+import plugins.Library.io.ObjectStreamReader;
+import plugins.Library.io.ObjectStreamWriter;
+import plugins.Library.io.serial.LiveArchiver;
+import plugins.Library.io.serial.Serialiser.PullTask;
+import plugins.Library.Priority;
+import plugins.Library.util.exec.SimpleProgress;
+import plugins.Library.util.exec.TaskAbortException;
import freenet.node.NodeClientCore;
import freenet.node.RequestClient;
import freenet.node.RequestStarter;
@@ -55,12 +62,11 @@
import freenet.support.Logger;
import freenet.support.io.FileUtil;
-
/**
* Library class is the api for others to use search facilities, it is used by the interfaces
* @author MikeB
*/
-final public class Library implements URLUpdateHook {
+final public class Library implements URLUpdateHook, ArchiverFactory {
public static final String BOOKMARK_PREFIX = "bookmark:";
public static final String DEFAULT_INDEX_SITE = BOOKMARK_PREFIX + "liberty-of-information" + " " + BOOKMARK_PREFIX + "free-market-free-people" + " " +
@@ -68,6 +74,8 @@ final public class Library implements URLUpdateHook {
private static int version = 36;
public static final String plugName = "Library " + getVersion();
+
+
public static String getPlugName() {
return plugName;
}
@@ -121,8 +129,9 @@ public boolean realTimeFlag() {
* Method to setup Library class so it has access to PluginRespirator, and load bookmarks
* TODO pull bookmarks from disk
*/
- private Library(PluginRespirator pr){
+ private Library(PluginRespirator pr) {
this.pr = pr;
+ FactoryRegister.register(this);
PluginStore ps;
if(pr!=null) {
this.exec = pr.getNode().executor;
@@ -184,7 +193,7 @@ private Library(PluginRespirator pr){
bookmarkCallbacks.put(name, callback);
USK u;
try {
- u = USK.create(uri);
+ u = USK.create(new freenet.keys.FreenetURI(uri.toString()));
} catch (MalformedURLException e) {
Logger.error(this, "Invalid bookmark USK: "+target+" for "+name, e);
continue;
@@ -193,6 +202,12 @@ private Library(PluginRespirator pr){
callback.ret = uskManager.subscribeContent(u, callback, false, pr.getHLSimpleClient().getFetchContext(), RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, rcBulk);
}
}
+ if (!bookmarks.containsKey("debbies-library-development-index")) {
+ addBookmark("debbies-library-development-index",
+ "USK@E0jWjfYUfJqESuiM~5ZklhTZXKCWapxl~CRj1jmZ-~I,gl48QSprqZC1mASLbE9EOhQoBa~PheO8r-q9Lqj~uXA,AQACAAE/index.yml/966");
+ migrated = true;
+ Logger.normal(this, "Added new default index");
+ }
if(bookmarks.isEmpty() || needNewWanna || !bookmarks.containsKey("gotcha") ||
!bookmarks.containsKey("liberty-of-information") ||
!bookmarks.containsKey("free-market-free-people")) {
@@ -241,16 +256,6 @@ public synchronized void saveState(){
* search for multiple terms in the same btree, but for now, turning off caching is the only viable option.
*/
-// /**
-// ** Holds all the read-indexes.
-// */
-// private Map rtab = new HashMap();
-//
-// /**
-// ** Holds all the writeable indexes.
-// */
-// private Map wtab = new HashMap();
-//
/**
** Holds all the bookmarks (aliases into the rtab).
*/
@@ -258,9 +263,13 @@ public synchronized void saveState(){
private Map bookmarkCallbacks = new HashMap();
+ /** Set of all the enabled indices */
+ public Set selectedIndices = new HashSet();
+
/**
** Get the index type giving a {@code FreenetURI}. This must not contain
** a metastring (end with "/") or be a USK.
+ * @throws MalformedURLException
*/
public Class> getIndexType(FreenetURI indexuri) throws FetchException {
if(indexuri.lastMetaString()!=null && indexuri.lastMetaString().equals(XMLIndex.DEFAULT_FILE))
@@ -329,10 +338,10 @@ public Class> getIndexType(FreenetURI indexuri) throws FetchException {
public Class> getIndexTypeFromMIME(String mime) {
if (mime.equals(ProtoIndex.MIME_TYPE)) {
- //return "YAML index";
+ // YAML index
return ProtoIndex.class;
} else if (mime.equals(XMLIndex.MIME_TYPE)) {
- //return "XML index";
+ // XML index
return XMLIndex.class;
} else {
throw new UnsupportedOperationException("Unknown mime-type for index: "+mime);
@@ -340,103 +349,6 @@ public Class> getIndexTypeFromMIME(String mime) {
}
-/*
- KEYEXPLORER slightly more efficient version that depends on KeyExplorer
-
- /**
- ** Get the index type giving a {@code FreenetURI}. This should have been
- ** passed through {@link KeyExplorerUtils#sanitizeURI(List, String)} at
- ** some point - ie. it must not contain a metastring (end with "/") or be
- ** a USK.
- * /
- public Class> getIndexType(FreenetURI uri)
- throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException {
- GetResult getresult = KeyExplorerUtils.simpleGet(pr, uri);
- byte[] data = BucketTools.toByteArray(getresult.getData());
-
- if (getresult.isMetaData()) {
- try {
- Metadata md = Metadata.construct(data);
-
- if (md.isArchiveManifest()) {
- if (md.getArchiveType() == ARCHIVE_TYPE.TAR) {
- return getIndexTypeFromManifest(uri, false, true);
-
- } else if (md.getArchiveType() == ARCHIVE_TYPE.ZIP) {
- return getIndexTypeFromManifest(uri, true, false);
-
- } else {
- throw new UnsupportedOperationException("not implemented - unknown archive manifest");
- }
-
- } else if (md.isSimpleManifest()) {
- return getIndexTypeFromManifest(uri, false, false);
- }
-
- return getIndexTypeFromSimpleMetadata(md);
-
- } catch (MetadataParseException e) {
- throw new RuntimeException(e);
- }
- } else {
- throw new UnsupportedOperationException("Found data instead of metadata; I do not have enough intelligence to decode this.");
- }
- }
-
- public Class> getIndexTypeFromSimpleMetadata(Metadata md) {
- String mime = md.getMIMEType();
- if (mime.equals(ProtoIndex.MIME_TYPE)) {
- //return "YAML index";
- return ProtoIndex.class;
- } else if (mime.equals(XMLIndex.MIME_TYPE)) {
- //return "XML index";
- return XMLIndex.class;
- } else {
- throw new UnsupportedOperationException("Unknown mime-type for index");
- }
- }
-
- public Class> getIndexTypeFromManifest(FreenetURI furi, boolean zip, boolean tar)
- throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException {
-
- boolean automf = true, deep = true, ml = true;
- Metadata md = null;
-
- if (zip)
- md = KeyExplorerUtils.zipManifestGet(pr, furi);
- else if (tar)
- md = KeyExplorerUtils.tarManifestGet(pr, furi, ".metadata");
- else {
- md = KeyExplorerUtils.simpleManifestGet(pr, furi);
- if (ml) {
- md = KeyExplorerUtils.splitManifestGet(pr, md);
- }
- }
-
- if (md.isSimpleManifest()) {
- // a subdir
- HashMap docs = md.getDocuments();
- Metadata defaultDoc = md.getDefaultDocument();
-
- if (defaultDoc != null) {
- //return "(default doc method) " + getIndexTypeFromSimpleMetadata(defaultDoc);
- return getIndexTypeFromSimpleMetadata(defaultDoc);
- }
-
- if (docs.containsKey(ProtoIndex.DEFAULT_FILE)) {
- //return "(doclist method) YAML index";
- return ProtoIndex.class;
- } else if (docs.containsKey(XMLIndex.DEFAULT_FILE)) {
- //return "(doclist method) XML index";
- return XMLIndex.class;
- } else {
- throw new UnsupportedOperationException("Could not find a supported index in the document-listings for " + furi.toString());
- }
- }
-
- throw new UnsupportedOperationException("Parsed metadata but did not reach a simple manifest: " + furi.toString());
- }
-*/
public Class> getIndexType(File f) {
if (f.getName().endsWith(ProtoIndexSerialiser.FILE_EXTENSION))
return ProtoIndex.class;
@@ -451,9 +363,7 @@ public Object getAddressTypeFromString(String indexuri) {
// return KeyExplorerUtils.sanitizeURI(new ArrayList(), indexuri); KEYEXPLORER
// OPT HIGH if it already ends with eg. *Index.DEFAULT_FILE, don't strip
// the MetaString, and have getIndexType behave accordingly
- FreenetURI tempURI = new FreenetURI(indexuri);
-// if (tempURI.hasMetaStrings()) { tempURI = tempURI.setMetaString(null); }
-// if (tempURI.isUSK()) { tempURI = tempURI.sskForUSK(); }
+ plugins.Library.io.FreenetURI tempURI = new plugins.Library.io.FreenetURI(indexuri);
return tempURI;
} catch (MalformedURLException e) {
File file = new File(indexuri);
@@ -480,7 +390,7 @@ public String addBookmark(String name, String uri) {
try {
u = new FreenetURI(uri);
if(u.isUSK()) {
- uskNew = USK.create(u);
+ uskNew = USK.create(new freenet.keys.FreenetURI(u.toString()));
edition = uskNew.suggestedEdition;
}
} catch (MalformedURLException e) {
@@ -503,7 +413,7 @@ public String addBookmark(String name, String uri) {
try {
FreenetURI uold = new FreenetURI(old);
if(uold.isUSK()) {
- USK usk = USK.create(uold);
+ USK usk = USK.create(new freenet.keys.FreenetURI(uold.toString()));
if(!(uskNew != null && usk.equals(uskNew, false))) {
uskManager.unsubscribe(usk, callback);
uskManager.unsubscribeContent(usk, callback.ret, true);
@@ -606,14 +516,6 @@ public final ArrayList getIndices(String indexuris) throws InvalidSearchE
return indices;
}
- // See comments near rtab. Can't use in parallel so not acceptable.
-// /**
-// * Method to get all of the instatiated Indexes
-// */
-// public final Iterable getAllIndices() {
-// return rtab.values();
-// }
-//
public final Index getIndex(String indexuri) throws InvalidSearchException, TaskAbortException {
return getIndex(indexuri, null);
}
@@ -644,10 +546,6 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
throw new InvalidSearchException("Index bookmark '"+indexuri+" does not exist");
}
- // See comments near rtab. Can't use in parallel so caching is dangerous.
-// if (rtab.containsKey(indexuri))
-// return rtab.get(indexuri);
-//
Class> indextype;
Index index;
Object indexkey;
@@ -663,9 +561,9 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
try {
if (indexkey instanceof File) {
indextype = getIndexType((File)indexkey);
- } else if (indexkey instanceof FreenetURI) {
+ } else if (indexkey instanceof plugins.Library.io.FreenetURI) {
// TODO HIGH make this non-blocking
- FreenetURI uri = (FreenetURI)indexkey;
+ FreenetURI uri = new FreenetURI(indexkey.toString());
if(uri.isUSK())
edition = uri.getEdition();
indextype = getIndexType(uri);
@@ -676,7 +574,7 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
if (indextype == ProtoIndex.class) {
// TODO HIGH this *must* be non-blocking as it fetches the whole index root
PullTask task = new PullTask(indexkey);
- ProtoIndexSerialiser.forIndex(indexkey, RequestStarter.INTERACTIVE_PRIORITY_CLASS).pull(task);
+ ProtoIndexSerialiser.forIndex(indexkey, Priority.Interactive).pull(task);
index = task.data;
} else if (indextype == XMLIndex.class) {
@@ -686,33 +584,22 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
throw new AssertionError();
}
- // See comments near rtab. Can't use in parallel so caching is dangerous.
- //rtab.put(indexuri, index);
Logger.normal(this, "Loaded index type " + indextype.getName() + " at " + indexuri);
return index;
+ } catch (MalformedURLException e) {
+ Logger.warning(this, "Failed to find index type", e);
+ throw new TaskAbortException("Failed to find index type " + indexuri+" : "+e, e, true);
} catch (FetchException e) {
- throw new TaskAbortException("Failed to fetch index " + indexuri+" : "+e, e, true); // can retry
-/* KEYEXPLORER
- } catch (IOException e) {
- throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry
-
- } catch (LowLevelGetException e) {
- throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry
-
- } catch (KeyListenerConstructionException e) {
- throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry
-
- } catch (MetadataParseException e) {
- throw new TaskAbortException("Failed to parse index " + indexuri, e);
-*/
+ Logger.warning(this, "Failed to find fetch index", e);
+ throw new TaskAbortException("Failed to fetch index " + indexuri+" : "+e, e, true);
} catch (UnsupportedOperationException e) {
+ Logger.warning(this, "Failed to find parse index", e);
throw new TaskAbortException("Failed to parse index " + indexuri+" : "+e, e);
-
} catch (RuntimeException e) {
+ Logger.warning(this, "Failed to find load index", e);
throw new TaskAbortException("Failed to load index " + indexuri+" : "+e, e);
-
}
}
@@ -724,7 +611,7 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
** @throws IllegalStateException if the singleton has not been initialised
** or if it does not have a respirator.
*/
- public static FreenetArchiver
+ public static LiveArchiver
makeArchiver(ObjectStreamReader r, ObjectStreamWriter w, String mime, int size, short priorityClass) {
if (lib == null || lib.pr == null) {
throw new IllegalStateException("Cannot archive to freenet without a fully live Library plugin connected to a freenet node.");
@@ -732,7 +619,7 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
return new FreenetArchiver(lib.pr.getNode().clientCore, r, w, mime, size, priorityClass);
}
}
-
+
/**
** Create a {@link FreenetArchiver} connected to the core of the
** singleton's {@link PluginRespirator}.
@@ -740,11 +627,35 @@ public final Index getIndex(String indexuri, String origIndexName) throws Invali
** @throws IllegalStateException if the singleton has not been initialised
** or if it does not have a respirator.
*/
- public static FreenetArchiver
+ public static LiveArchiver
makeArchiver(S rw, String mime, int size, short priorityClass) {
return Library.makeArchiver(rw, rw, mime, size, priorityClass);
}
+ public LiveArchiver
+ newArchiver(S rw, String mime, int size, Priority priorityLevel) {
+ short priorityClass = 0;
+ switch (priorityLevel) {
+ case Interactive:
+ priorityClass = RequestStarter.INTERACTIVE_PRIORITY_CLASS;
+ break;
+ case Bulk:
+ priorityClass = RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS;
+ break;
+ }
+ return makeArchiver(rw, mime, size, priorityClass);
+ }
+
+ public LiveArchiver
+ newArchiver(S rw, String mime, int size, LiveArchiver archiver) {
+ short priorityClass = RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS;
+ if (archiver != null &&
+ archiver instanceof FreenetArchiver)
+ priorityClass = ((FreenetArchiver) archiver).priorityClass;
+
+ return makeArchiver(rw, mime, size, priorityClass);
+ }
+
public static String convertToHex(byte[] data) {
StringBuilder buf = new StringBuilder();
for (int i = 0; i < data.length; i++) {
diff --git a/src/plugins/Library/Main.java b/src/plugins/Library/Main.java
index 600cb330..f45a1e69 100644
--- a/src/plugins/Library/Main.java
+++ b/src/plugins/Library/Main.java
@@ -3,76 +3,25 @@
* http://www.gnu.org/ for further details of the GPL. */
package plugins.Library;
-import freenet.node.RequestStarter;
import freenet.pluginmanager.PluginReplySender;
-import freenet.support.MutableBoolean;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
-import freenet.support.io.BucketTools;
-import freenet.support.io.Closer;
-import freenet.support.io.FileBucket;
-import freenet.support.io.FileUtil;
-import freenet.support.io.LineReadingInputStream;
-import freenet.support.io.NativeThread;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.logging.Level;
-
-import plugins.Library.client.FreenetArchiver;
-import plugins.Library.index.ProtoIndex;
-import plugins.Library.index.ProtoIndexComponentSerialiser;
-import plugins.Library.index.ProtoIndexSerialiser;
-import plugins.Library.index.TermEntry;
-import plugins.Library.index.TermPageEntry;
+
import plugins.Library.search.Search;
import plugins.Library.ui.WebInterface;
-import plugins.Library.util.SkeletonBTreeMap;
-import plugins.Library.util.SkeletonBTreeSet;
-import plugins.Library.util.TaskAbortExceptionConvertor;
-import plugins.Library.util.concurrent.Executors;
-import plugins.Library.util.exec.SimpleProgress;
-import plugins.Library.util.exec.TaskAbortException;
-import plugins.Library.util.func.Closure;
import freenet.pluginmanager.FredPlugin;
import freenet.pluginmanager.FredPluginL10n;
import freenet.pluginmanager.FredPluginRealVersioned;
import freenet.pluginmanager.FredPluginThreadless;
import freenet.pluginmanager.FredPluginVersioned;
-import freenet.pluginmanager.PluginNotFoundException;
import freenet.pluginmanager.PluginRespirator;
import freenet.support.Executor;
-import freenet.client.InsertException;
-import freenet.keys.FreenetURI;
-import freenet.keys.InsertableClientSSK;
-import freenet.l10n.BaseL10n.LANGUAGE;
+import plugins.Library.util.concurrent.Executors;
import freenet.pluginmanager.FredPluginFCP;
import freenet.support.Logger;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.FilenameFilter;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
import java.security.MessageDigest;
-import plugins.Library.index.TermEntryReaderWriter;
-import plugins.Library.index.xml.LibrarianHandler;
-import plugins.Library.io.serial.LiveArchiver;
-import plugins.Library.io.serial.Serialiser.PullTask;
-import plugins.Library.io.serial.Serialiser.PushTask;
/**
* Library class is the api for others to use search facilities, it is used by the interfaces
@@ -85,10 +34,7 @@ public class Main implements FredPlugin, FredPluginVersioned, freenet.pluginmana
private Library library;
private WebInterface webinterface;
private SpiderIndexUploader uploader;
-
- static volatile boolean logMINOR;
- static volatile boolean logDEBUG;
-
+
static {
Logger.registerClass(Main.class);
}
@@ -195,5 +141,4 @@ public void handle(PluginReplySender replysender, SimpleFieldSet params, final B
Logger.error(this, "Unknown command : \""+params.get("command"));
}
}
-
}
diff --git a/src/plugins/Library/Priority.java b/src/plugins/Library/Priority.java
new file mode 100644
index 00000000..0f702d26
--- /dev/null
+++ b/src/plugins/Library/Priority.java
@@ -0,0 +1,6 @@
+package plugins.Library;
+
+public enum Priority {
+ Interactive,
+ Bulk;
+}
diff --git a/src/plugins/Library/SpiderIndexURIs.java b/src/plugins/Library/SpiderIndexURIs.java
index 46ce8edd..e6ee89e6 100644
--- a/src/plugins/Library/SpiderIndexURIs.java
+++ b/src/plugins/Library/SpiderIndexURIs.java
@@ -24,11 +24,6 @@ class SpiderIndexURIs {
this.pr = pr;
}
- synchronized long setEdition(long newEdition) {
- if(newEdition < edition) return edition;
- else return edition = newEdition;
- }
-
synchronized FreenetURI loadSSKURIs() {
if(privURI == null) {
File f = new File(SpiderIndexUploader.PRIV_URI_FILENAME);
@@ -82,29 +77,32 @@ synchronized FreenetURI loadSSKURIs() {
} finally {
Closer.close(fos);
}
- try {
- fis = new FileInputStream(new File(SpiderIndexUploader.EDITION_FILENAME));
- BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
- try {
- edition = Long.parseLong(br.readLine());
- } catch (NumberFormatException e) {
- edition = 0;
- }
- Logger.debug(this, "Edition: "+edition);
- fis.close();
- fis = null;
- } catch (IOException e) {
- // Ignore
- edition = 0;
- } finally {
- Closer.close(fis);
- }
+//<<<<<<< HEAD
+// try {
+// fis = new FileInputStream(new File(SpiderIndexUploader.EDITION_FILENAME));
+// BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
+// try {
+// edition = Long.parseLong(br.readLine());
+// } catch (NumberFormatException e) {
+// edition = 0;
+// }
+// Logger.debug(this, "Edition: "+edition);
+// fis.close();
+// fis = null;
+// } catch (IOException e) {
+// // Ignore
+// edition = 0;
+// } finally {
+// Closer.close(fis);
+// }
+//=======
+//>>>>>>> debbiedub/fcp-uploader
}
return privURI;
}
synchronized FreenetURI getPrivateUSK() {
- return loadSSKURIs().setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(edition);
+ return loadSSKURIs().setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(getLastUploadedEdition());
}
/** Will return edition -1 if no successful uploads so far, otherwise the correct edition. */
@@ -114,8 +112,23 @@ synchronized FreenetURI getPublicUSK() {
}
private synchronized long getLastUploadedEdition() {
- /** If none uploaded, return -1, otherwise return the last uploaded version. */
- return edition-1;
+ FileInputStream fis = null;
+ try {
+ fis = new FileInputStream(new File(SpiderIndexUploader.EDITION_FILENAME));
+ BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
+ try {
+ edition = Long.parseLong(br.readLine());
+ } catch (NumberFormatException e) {
+ Logger.error(this, "Failed to parse edition", e);
+ }
+ fis.close();
+ fis = null;
+ } catch (IOException e) {
+ Logger.error(this, "Failed to read edition", e);
+ } finally {
+ Closer.close(fis);
+ }
+ return edition;
}
}
\ No newline at end of file
diff --git a/src/plugins/Library/SpiderIndexUploader.java b/src/plugins/Library/SpiderIndexUploader.java
index 9b853337..8dbca853 100644
--- a/src/plugins/Library/SpiderIndexUploader.java
+++ b/src/plugins/Library/SpiderIndexUploader.java
@@ -1,57 +1,19 @@
package plugins.Library;
-import java.io.BufferedReader;
-import java.io.EOFException;
import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
import java.io.FilenameFilter;
import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.net.MalformedURLException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.logging.Level;
-import plugins.Library.client.FreenetArchiver;
-import plugins.Library.index.ProtoIndex;
-import plugins.Library.index.ProtoIndexComponentSerialiser;
-import plugins.Library.index.ProtoIndexSerialiser;
-import plugins.Library.index.TermEntry;
-import plugins.Library.index.TermEntryReaderWriter;
-import plugins.Library.io.serial.LiveArchiver;
-import plugins.Library.io.serial.Serialiser.PullTask;
-import plugins.Library.io.serial.Serialiser.PushTask;
-import plugins.Library.util.SkeletonBTreeMap;
-import plugins.Library.util.SkeletonBTreeSet;
-import plugins.Library.util.TaskAbortExceptionConvertor;
-import plugins.Library.util.exec.SimpleProgress;
-import plugins.Library.util.exec.TaskAbortException;
-import plugins.Library.util.func.Closure;
-import freenet.client.InsertException;
import freenet.keys.FreenetURI;
-import freenet.node.RequestStarter;
import freenet.pluginmanager.PluginNotFoundException;
import freenet.pluginmanager.PluginReplySender;
import freenet.pluginmanager.PluginRespirator;
import freenet.support.Logger;
-import freenet.support.MutableBoolean;
import freenet.support.SimpleFieldSet;
-import freenet.support.TimeUtil;
import freenet.support.api.Bucket;
import freenet.support.io.BucketTools;
-import freenet.support.io.Closer;
import freenet.support.io.FileBucket;
-import freenet.support.io.FileUtil;
-import freenet.support.io.LineReadingInputStream;
+
public class SpiderIndexUploader {
@@ -66,828 +28,835 @@ public class SpiderIndexUploader {
}
private final PluginRespirator pr;
- private Object freenetMergeSync = new Object();
- private boolean freenetMergeRunning = false;
- private boolean diskMergeRunning = false;
-
- private final ArrayList toMergeToDisk = new ArrayList();
- static final int MAX_HANDLING_COUNT = 5;
- // When pushing is broken, allow max handling to reach this level before stalling forever to prevent running out of disk space.
- private int PUSH_BROKEN_MAX_HANDLING_COUNT = 10;
- // Don't use too much disk space, take into account fact that Spider slows down over time.
-
- private boolean pushBroken;
-
- /** The temporary on-disk index. We merge stuff into this until it exceeds a threshold size, then
- * we create a new diskIdx and merge the old one into the idxFreenet. */
- ProtoIndex idxDisk;
- /** idxDisk gets merged into idxFreenet this long after the last merge completed. */
- static final long MAX_TIME = 24*60*60*1000L;
- /** idxDisk gets merged into idxFreenet after this many incoming updates from Spider. */
- static final int MAX_UPDATES = 16;
- /** idxDisk gets merged into idxFreenet after it has grown to this many terms.
- * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must
- * fit into memory during the merge process. */
- static final int MAX_TERMS = 100*1000;
- /** idxDisk gets merged into idxFreenet after it has grown to this many terms.
- * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must
- * fit into memory during the merge process. */
- static final int MAX_TERMS_NOT_UPLOADED = 10*1000;
- /** Maximum size of a single entry, in TermPageEntry count, on disk. If we exceed this we force an
- * insert-to-freenet and move on to a new disk index. The problem is that the merge to Freenet has
- * to keep the whole of each entry in RAM. This is only true for the data being merged in - the
- * on-disk index - and not for the data on Freenet, which is pulled on demand. SCALABILITY */
- static final int MAX_DISK_ENTRY_SIZE = 10000;
- /** Like pushNumber, the number of the current disk dir, used to create idxDiskDir. */
- private int dirNumber;
- static final String DISK_DIR_PREFIX = "library-temp-index-";
- /** Directory the current idxDisk is saved in. */
- File idxDiskDir;
- private int mergedToDisk;
-
- ProtoIndexSerialiser srl = null;
- FreenetURI lastUploadURI = null;
- String lastDiskIndexName;
- /** The uploaded index on Freenet. This never changes, it just gets updated. */
- ProtoIndex idxFreenet;
-
+//<<<<<<< HEAD
+// private final Object freenetMergeSync = new Object();
+// private boolean freenetMergeRunning = false;
+// private boolean diskMergeRunning = false;
+//
+// private final ArrayList toMergeToDisk = new ArrayList();
+// static final int MAX_HANDLING_COUNT = 5;
+// // When pushing is broken, allow max handling to reach this level before stalling forever to prevent running out of disk space.
+// private int PUSH_BROKEN_MAX_HANDLING_COUNT = 10;
+// // Don't use too much disk space, take into account fact that Spider slows down over time.
+//
+// // Flag for the Spider plugin to hang
+// private boolean pushBroken;
+//
+// /** The temporary on-disk index. We merge stuff into this until it exceeds a threshold size, then
+// * we create a new diskIdx and merge the old one into the idxFreenet. */
+// ProtoIndex idxDisk;
+// /** idxDisk gets merged into idxFreenet this long after the last merge completed. */
+// static final long MAX_TIME = 24*60*60*1000L;
+// /** idxDisk gets merged into idxFreenet after this many incoming updates from Spider. */
+// static final int MAX_UPDATES = 16;
+// /** idxDisk gets merged into idxFreenet after it has grown to this many terms.
+// * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must
+// * fit into memory during the merge process. */
+// static final int MAX_TERMS = 100*1000;
+// /** idxDisk gets merged into idxFreenet after it has grown to this many terms.
+// * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must
+// * fit into memory during the merge process. */
+// static final int MAX_TERMS_NOT_UPLOADED = 10*1000;
+// /** Maximum size of a single entry, in TermPageEntry count, on disk. If we exceed this we force an
+// * insert-to-freenet and move on to a new disk index. The problem is that the merge to Freenet has
+// * to keep the whole of each entry in RAM. This is only true for the data being merged in - the
+// * on-disk index - and not for the data on Freenet, which is pulled on demand. SCALABILITY */
+// static final int MAX_DISK_ENTRY_SIZE = 10000;
+// /** Like pushNumber, the number of the current disk dir, used to create idxDiskDir. */
+// private int dirNumber;
+// static final String DISK_DIR_PREFIX = "library-temp-index-";
+// /** Directory the current idxDisk is saved in. */
+// File idxDiskDir;
+// private int mergedToDisk;
+//
+// ProtoIndexSerialiser srl = null;
+// FreenetURI lastUploadURI = null;
+// String lastDiskIndexName;
+// /** The uploaded index on Freenet. This never changes, it just gets updated. */
+// ProtoIndex idxFreenet;
+//
+//=======
+//>>>>>>> debbiedub/fcp-uploader
private final SpiderIndexURIs spiderIndexURIs;
- long pushNumber;
- static final String LAST_URL_FILENAME = "library.index.lastpushed.chk";
+ private long pushNumber;
static final String PRIV_URI_FILENAME = "library.index.privkey";
static final String PUB_URI_FILENAME = "library.index.pubkey";
- static final String EDITION_FILENAME = "library.index.next-edition";
-
- static final String LAST_DISK_FILENAME = "library.index.lastpushed.disk";
+ static final String EDITION_FILENAME = "library.index.last-edition";
static final String BASE_FILENAME_PUSH_DATA = "library.index.data.";
- /** Merge from the Bucket chain to the on-disk idxDisk. */
- protected void wrapMergeToDisk() {
- spiderIndexURIs.loadSSKURIs();
- boolean first = true;
- while(true) {
- final Bucket data;
- synchronized(freenetMergeSync) {
- if(pushBroken) {
- Logger.error(this, "Pushing broken");
- return;
- }
- if(first && diskMergeRunning) {
- Logger.error(this, "Already running a handler!");
- return;
- } else if((!first) && (!diskMergeRunning)) {
- Logger.error(this, "Already running yet runningHandler is false?!");
- return;
- }
- first = false;
- if(toMergeToDisk.size() == 0) {
- if(logMINOR) Logger.minor(this, "Nothing to handle");
- diskMergeRunning = false;
- freenetMergeSync.notifyAll();
- return;
- }
- data = toMergeToDisk.remove(0);
- freenetMergeSync.notifyAll();
- diskMergeRunning = true;
- }
- try {
- mergeToDisk(data);
- } catch (Throwable t) {
- // Failed.
- synchronized(freenetMergeSync) {
- diskMergeRunning = false;
- pushBroken = true;
- freenetMergeSync.notifyAll();
- }
- if(t instanceof RuntimeException)
- throw (RuntimeException)t;
- if(t instanceof Error)
- throw (Error)t;
- }
- }
- }
-
- // This is a member variable because it is huge, and having huge stuff in local variables seems to upset the default garbage collector.
- // It doesn't need to be synchronized because it's always used from mergeToDisk, which never runs in parallel.
- private Map> newtrees;
- // Ditto
- private SortedSet terms;
-
- ProtoIndexSerialiser srlDisk = null;
- private ProtoIndexComponentSerialiser leafsrlDisk;
-
- private long lastMergedToFreenet = -1;
-
- /** Merge a bucket of TermEntry's into an on-disk index. */
- private void mergeToDisk(Bucket data) {
-
- boolean newIndex = false;
-
- if(idxDiskDir == null) {
- newIndex = true;
- if(!createDiskDir()) return;
- }
-
- if(!makeDiskDirSerialiser()) return;
-
- // Read data into newtrees and trees.
- long entriesAdded = readTermsFrom(data);
-
- if(terms.size() == 0) {
- Logger.debug(this, "Nothing to merge");
- synchronized(this) {
- newtrees = null;
- terms = null;
- }
- return;
- }
-
- // Merge the new data to the disk index.
-
- try {
- final MutableBoolean maxDiskEntrySizeExceeded = new MutableBoolean();
- maxDiskEntrySizeExceeded.value = false;
- long mergeStartTime = System.currentTimeMillis();
- if(newIndex) {
- if(createDiskIndex())
- maxDiskEntrySizeExceeded.value = true;
- } else {
- // async merge
- Closure>, TaskAbortException> clo =
- createMergeFromNewtreesClosure(maxDiskEntrySizeExceeded);
- assert(idxDisk.ttab.isBare());
- Logger.debug(this, "Merging "+terms.size()+" terms, tree.size = "+idxDisk.ttab.size()+" from "+data+"...");
- idxDisk.ttab.update(terms, null, clo, new TaskAbortExceptionConvertor());
-
- }
- // Synchronize anyway so garbage collector knows about it.
- synchronized(this) {
- newtrees = null;
- terms = null;
- }
- assert(idxDisk.ttab.isBare());
- PushTask task4 = new PushTask(idxDisk);
- srlDisk.push(task4);
-
- long mergeEndTime = System.currentTimeMillis();
- Logger.debug(this, entriesAdded + " entries merged to disk in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", ");
- // FileArchiver produces a String, which is a filename not including the prefix or suffix.
- String uri = (String)task4.meta;
- lastDiskIndexName = uri;
- Logger.normal(this, "Pushed new index to file "+uri);
- if(writeStringTo(new File(LAST_DISK_FILENAME), uri) &&
- writeStringTo(new File(idxDiskDir, LAST_DISK_FILENAME), uri)) {
- // Successfully uploaded and written new status. Can delete the incoming data.
- data.free();
- }
-
- maybeMergeToFreenet(maxDiskEntrySizeExceeded);
- } catch (TaskAbortException e) {
- Logger.error(this, "Failed to upload index for spider: ", e);
- e.printStackTrace();
- synchronized(freenetMergeSync) {
- pushBroken = true;
- }
- }
- }
-
- /** We have just written a Bucket of new data to an on-disk index. We may or may not want to
- * upload to an on-Freenet index, depending on how big the data is etc. If we do, we will need
- * to create a new on-disk index.
- * @param maxDiskEntrySizeExceeded A flag object which is set (off-thread) if any single term
- * in the index is very large.
- */
- private void maybeMergeToFreenet(MutableBoolean maxDiskEntrySizeExceeded) {
- // Maybe chain to mergeToFreenet ???
-
- boolean termTooBig = false;
- synchronized(maxDiskEntrySizeExceeded) {
- termTooBig = maxDiskEntrySizeExceeded.value;
- }
-
- mergedToDisk++;
- if((lastMergedToFreenet > 0 && idxDisk.ttab.size() > MAX_TERMS) ||
- (idxDisk.ttab.size() > MAX_TERMS_NOT_UPLOADED)
- || (mergedToDisk > MAX_UPDATES) || termTooBig ||
- (lastMergedToFreenet > 0 && (System.currentTimeMillis() - lastMergedToFreenet) > MAX_TIME)) {
-
- final ProtoIndex diskToMerge = idxDisk;
- final File dir = idxDiskDir;
- Logger.debug(this, "Exceeded threshold, starting new disk index and starting merge from disk to Freenet...");
- mergedToDisk = 0;
- lastMergedToFreenet = -1;
- idxDisk = null;
- srlDisk = null;
- leafsrlDisk = null;
- idxDiskDir = null;
- lastDiskIndexName = null;
-
- synchronized(freenetMergeSync) {
- while(freenetMergeRunning) {
- if(pushBroken) return;
- Logger.normal(this, "Need to merge to Freenet, but last merge not finished yet. Waiting...");
- try {
- freenetMergeSync.wait();
- } catch (InterruptedException e) {
- // Ignore
- }
- }
- if(pushBroken) return;
- freenetMergeRunning = true;
- }
-
- Runnable r = new Runnable() {
-
- public void run() {
- try {
- mergeToFreenet(diskToMerge, dir);
- } catch (Throwable t) {
- Logger.error(this, "Merge to Freenet failed: ", t);
- t.printStackTrace();
- synchronized(freenetMergeSync) {
- pushBroken = true;
- }
- } finally {
- synchronized(freenetMergeSync) {
- freenetMergeRunning = false;
- if(!pushBroken)
- lastMergedToFreenet = System.currentTimeMillis();
- freenetMergeSync.notifyAll();
- }
- }
- }
-
- };
- pr.getNode().executor.execute(r, "Library: Merge data from disk to Freenet");
- } else {
- Logger.debug(this, "Not merging to Freenet yet: "+idxDisk.ttab.size()+" terms in index, "+mergedToDisk+" merges, "+(lastMergedToFreenet <= 0 ? "never merged to Freenet" : ("last merged to Freenet "+TimeUtil.formatTime(System.currentTimeMillis() - lastMergedToFreenet))+"ago"));
- }
- }
-
- private boolean writeURITo(File filename, FreenetURI uri) {
- return writeStringTo(filename, uri.toString());
- }
-
- private boolean writeStringTo(File filename, String uri) {
- FileOutputStream fos = null;
- try {
- fos = new FileOutputStream(filename);
- OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8");
- osw.write(uri.toString());
- osw.close();
- fos = null;
- return true;
- } catch (IOException e) {
- Logger.error(this, "Failed to write to "+filename+" : "+uri, e);
- return false;
- } finally {
- Closer.close(fos);
- }
- }
-
- private String readStringFrom(File file) {
- String ret;
- FileInputStream fis = null;
- try {
- fis = new FileInputStream(file);
- BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
- ret = br.readLine();
- fis.close();
- fis = null;
- return ret;
- } catch (IOException e) {
- // Ignore
- return null;
- } finally {
- Closer.close(fis);
- }
- }
-
- private FreenetURI readURIFrom(File file) {
- String s = readStringFrom(file);
- if(s != null) {
- try {
- return new FreenetURI(s);
- } catch (MalformedURLException e) {
- // Ignore.
- }
- }
- return null;
- }
-
- /** Create a callback object which will do the merging of individual terms. This will be called
- * for each term as it is unpacked from the existing on-disk index. It then merges in new data
- * from newtrees and writes the subtree for the term back to disk. Most of the work is done in
- * update() below.
- * @param maxDiskEntrySizeExceeded Will be set if any single term is so large that we need to
- * upload to Freenet immediately. */
- private Closure>, TaskAbortException> createMergeFromNewtreesClosure(final MutableBoolean maxDiskEntrySizeExceeded) {
- return new
- Closure>, TaskAbortException>() {
- /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException {
- String key = entry.getKey();
- SkeletonBTreeSet tree = entry.getValue();
- if(logMINOR) Logger.minor(this, "Processing: "+key+" : "+tree);
- if(tree != null)
- Logger.debug(this, "Merging data (on disk) in term "+key);
- else
- Logger.debug(this, "Adding new term to disk index: "+key);
- //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)"));
- if (tree == null) {
- entry.setValue(tree = makeEntryTree(leafsrlDisk));
- }
- assert(tree.isBare());
- SortedSet toMerge = newtrees.get(key);
- tree.update(toMerge, null);
- if(toMerge.size() > MAX_DISK_ENTRY_SIZE)
- synchronized(maxDiskEntrySizeExceeded) {
- maxDiskEntrySizeExceeded.value = true;
- }
- toMerge = null;
- newtrees.remove(key);
- assert(tree.isBare());
- if(logMINOR) Logger.minor(this, "Updated: "+key+" : "+tree);
- //System.out.println("handled " + key);
- }
- };
- }
-
- /** Create a new on-disk index from terms and newtrees.
- * @return True if the size of any one item in the index is so large that we must upload
- * immediately to Freenet.
- * @throws TaskAbortException If something broke catastrophically. */
- private boolean createDiskIndex() throws TaskAbortException {
- boolean tooBig = false;
- // created a new index, fill it with data.
- // DON'T MERGE, merge with a lot of data will deadlock.
- // FIXME throw in update() if it will deadlock.
- for(String key : terms) {
- SkeletonBTreeSet tree = makeEntryTree(leafsrlDisk);
- SortedSet toMerge = newtrees.get(key);
- tree.addAll(toMerge);
- if(toMerge.size() > MAX_DISK_ENTRY_SIZE)
- tooBig = true;
- toMerge = null;
- tree.deflate();
- assert(tree.isBare());
- idxDisk.ttab.put(key, tree);
- }
- idxDisk.ttab.deflate();
- return tooBig;
- }
-
- /** Read the TermEntry's from the Bucket into newtrees and terms, and set up the index
- * properties.
- * @param data The Bucket containing TermPageEntry's etc serialised with TermEntryReaderWriter.
- */
- private long readTermsFrom(Bucket data) {
- FileWriter w = null;
- newtrees = new HashMap>();
- terms = new TreeSet();
- int entriesAdded = 0;
- InputStream is = null;
- try {
- Logger.normal(this, "Bucket of buffer received, "+data.size()+" bytes");
- is = data.getInputStream();
- SimpleFieldSet fs = new SimpleFieldSet(new LineReadingInputStream(is), 1024, 512, true, true, true);
- idxDisk.setName(fs.get("index.title"));
- idxDisk.setOwnerEmail(fs.get("index.owner.email"));
- idxDisk.setOwner(fs.get("index.owner.name"));
- idxDisk.setTotalPages(fs.getLong("totalPages", -1));
- try{
- while(true){ // Keep going til an EOFExcepiton is thrown
- TermEntry readObject = TermEntryReaderWriter.getInstance().readObject(is);
- SortedSet set = newtrees.get(readObject.subj);
- if(set == null)
- newtrees.put(readObject.subj, set = new TreeSet());
- set.add(readObject);
- terms.add(readObject.subj);
- entriesAdded++;
- }
- }catch(EOFException e){
- // EOF, do nothing
- }
- } catch (IOException ex) {
- java.util.logging.Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex);
- } finally {
- Closer.close(is);
- }
- return entriesAdded;
- }
-
- /** Create a directory for an on-disk index.
- * @return False if something broke and we can't continue. */
- private boolean createDiskDir() {
- dirNumber++;
- idxDiskDir = new File(DISK_DIR_PREFIX + Integer.toString(dirNumber));
- Logger.normal(this, "Created new disk dir for merging: "+idxDiskDir);
- if(!(idxDiskDir.mkdir() || idxDiskDir.isDirectory())) {
- Logger.error(this, "Unable to create new disk dir: "+idxDiskDir);
- synchronized(this) {
- pushBroken = true;
- return false;
- }
- }
- return true;
- }
-
- /** Set up the serialisers for an on-disk index.
- * @return False if something broke and we can't continue. */
- private boolean makeDiskDirSerialiser() {
- if(srlDisk == null) {
- srlDisk = ProtoIndexSerialiser.forIndex(idxDiskDir);
- LiveArchiver