use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.
the class InputStreamIndexInputTests method testMarkRest.
public void testMarkRest() throws Exception {
RAMDirectory dir = new RAMDirectory();
IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
for (int i = 0; i < 3; i++) {
output.writeByte((byte) 1);
}
for (int i = 0; i < 3; i++) {
output.writeByte((byte) 2);
}
output.close();
IndexInput input = dir.openInput("test", IOContext.DEFAULT);
InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
assertThat(is.markSupported(), equalTo(true));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(1));
is.mark(0);
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(2));
is.reset();
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(2));
}
use of org.apache.lucene.store.IndexInput in project neo4j by neo4j.
the class PartitionedIndexStorage method cleanupLuceneDirectory.
/**
* Removes content of the lucene directory denoted by the given {@link File file}. This might seem unnecessary
* since we cleanup the folder using {@link FileSystemAbstraction file system} but in fact for testing we often use
* in-memory directories whose content can't be removed via the file system.
* <p>
* Uses {@link FileUtils#windowsSafeIOOperation(FileUtils.FileOperation)} underneath.
*
* @param folder the path to the directory to cleanup.
* @param zip an optional zip output stream to archive files into.
* @param buffer a byte buffer to use for copying bytes from the files into the archive.
* @throws IOException if removal operation fails.
*/
private void cleanupLuceneDirectory(File folder, ZipOutputStream zip, byte[] buffer) throws IOException {
try (Directory dir = directoryFactory.open(folder)) {
String folderName = folder.getName() + "/";
if (zip != null) {
zip.putNextEntry(new ZipEntry(folderName));
zip.closeEntry();
}
String[] indexFiles = dir.listAll();
for (String indexFile : indexFiles) {
if (zip != null) {
zip.putNextEntry(new ZipEntry(folderName + indexFile));
try (IndexInput input = dir.openInput(indexFile, IOContext.READ)) {
for (long pos = 0, size = input.length(); pos < size; ) {
int read = Math.min(buffer.length, (int) (size - pos));
input.readBytes(buffer, 0, read);
pos += read;
zip.write(buffer, 0, read);
}
}
zip.closeEntry();
}
FileUtils.windowsSafeIOOperation(() -> dir.deleteFile(indexFile));
}
}
}
use of org.apache.lucene.store.IndexInput in project gerrit by GerritCodeReview.
the class DocIndexer method zip.
private byte[] zip(RAMDirectory dir) throws IOException {
ByteArrayOutputStream buf = new ByteArrayOutputStream();
try (ZipOutputStream zip = new ZipOutputStream(buf)) {
for (String name : dir.listAll()) {
try (IndexInput in = dir.openInput(name, null)) {
int len = (int) in.length();
byte[] tmp = new byte[len];
ZipEntry entry = new ZipEntry(name);
entry.setSize(len);
in.readBytes(tmp, 0, len);
zip.putNextEntry(entry);
zip.write(tmp, 0, len);
zip.closeEntry();
}
}
}
return buf.toByteArray();
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class FSTTester method doTest.
FST<T> doTest(int prune1, int prune2, boolean allowRandomSuffixSharing) throws IOException {
if (LuceneTestCase.VERBOSE) {
System.out.println("\nTEST: prune1=" + prune1 + " prune2=" + prune2);
}
final Builder<T> builder = new Builder<>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, prune1, prune2, prune1 == 0 && prune2 == 0, allowRandomSuffixSharing ? random.nextBoolean() : true, allowRandomSuffixSharing ? TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE, outputs, true, 15);
for (InputOutput<T> pair : pairs) {
if (pair.output instanceof List) {
@SuppressWarnings("unchecked") List<Long> longValues = (List<Long>) pair.output;
@SuppressWarnings("unchecked") final Builder<Object> builderObject = (Builder<Object>) builder;
for (Long value : longValues) {
builderObject.add(pair.input, value);
}
} else {
builder.add(pair.input, pair.output);
}
}
FST<T> fst = builder.finish();
if (random.nextBoolean() && fst != null) {
IOContext context = LuceneTestCase.newIOContext(random);
IndexOutput out = dir.createOutput("fst.bin", context);
fst.save(out);
out.close();
IndexInput in = dir.openInput("fst.bin", context);
try {
fst = new FST<>(in, outputs);
} finally {
in.close();
dir.deleteFile("fst.bin");
}
}
if (LuceneTestCase.VERBOSE && pairs.size() <= 20 && fst != null) {
System.out.println("Printing FST as dot file to stdout:");
final Writer w = new OutputStreamWriter(System.out, Charset.defaultCharset());
Util.toDot(fst, w, false, false);
w.flush();
System.out.println("END dot file");
}
if (LuceneTestCase.VERBOSE) {
if (fst == null) {
System.out.println(" fst has 0 nodes (fully pruned)");
} else {
System.out.println(" fst has " + builder.getNodeCount() + " nodes and " + builder.getArcCount() + " arcs");
}
}
if (prune1 == 0 && prune2 == 0) {
verifyUnPruned(inputMode, fst);
} else {
verifyPruned(inputMode, fst, prune1, prune2);
}
nodeCount = builder.getNodeCount();
arcCount = builder.getArcCount();
return fst;
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class SolrCore method writeNewIndexProps.
/**
* Write the index.properties file with the new index sub directory name
* @param dir a data directory (containing an index.properties file)
* @param tmpFileName the file name to write the new index.properties to
* @param tmpIdxDirName new index directory name
*/
private static void writeNewIndexProps(Directory dir, String tmpFileName, String tmpIdxDirName) {
if (tmpFileName == null) {
tmpFileName = IndexFetcher.INDEX_PROPERTIES;
}
final Properties p = new Properties();
// Read existing properties
try {
final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
final InputStream is = new PropertiesInputStream(input);
try {
p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
} catch (Exception e) {
log.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
} finally {
IOUtils.closeQuietly(is);
}
} catch (IOException e) {
// ignore; file does not exist
}
p.put("index", tmpIdxDirName);
// Write new properties
Writer os = null;
try {
IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
p.store(os, IndexFetcher.INDEX_PROPERTIES);
dir.sync(Collections.singleton(tmpFileName));
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
} finally {
IOUtils.closeQuietly(os);
}
}
Aggregations