use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class TestDocIdsWriter method test.
private void test(Directory dir, int[] ints) throws Exception {
final long len;
try (IndexOutput out = dir.createOutput("tmp", IOContext.DEFAULT)) {
DocIdsWriter.writeDocIds(ints, 0, ints.length, out);
len = out.getFilePointer();
if (random().nextBoolean()) {
// garbage
out.writeLong(0);
}
}
try (IndexInput in = dir.openInput("tmp", IOContext.READONCE)) {
int[] read = new int[ints.length];
DocIdsWriter.readInts(in, ints.length, read);
assertArrayEquals(ints, read);
assertEquals(len, in.getFilePointer());
}
try (IndexInput in = dir.openInput("tmp", IOContext.READONCE)) {
int[] read = new int[ints.length];
DocIdsWriter.readInts(in, ints.length, new IntersectVisitor() {
int i = 0;
@Override
public void visit(int docID) throws IOException {
read[i++] = docID;
}
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
throw new UnsupportedOperationException();
}
});
assertArrayEquals(ints, read);
assertEquals(len, in.getFilePointer());
}
dir.deleteFile("tmp");
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class TestDirectMonotonic method testEmpty.
public void testEmpty() throws IOException {
Directory dir = newDirectory();
final int blockShift = TestUtil.nextInt(random(), DirectMonotonicWriter.MIN_BLOCK_SHIFT, DirectMonotonicWriter.MAX_BLOCK_SHIFT);
final long dataLength;
try (IndexOutput metaOut = dir.createOutput("meta", IOContext.DEFAULT);
IndexOutput dataOut = dir.createOutput("data", IOContext.DEFAULT)) {
DirectMonotonicWriter w = DirectMonotonicWriter.getInstance(metaOut, dataOut, 0, blockShift);
w.finish();
dataLength = dataOut.getFilePointer();
}
try (IndexInput metaIn = dir.openInput("meta", IOContext.READONCE);
IndexInput dataIn = dir.openInput("data", IOContext.DEFAULT)) {
DirectMonotonicReader.Meta meta = DirectMonotonicReader.loadMeta(metaIn, 0, blockShift);
DirectMonotonicReader.getInstance(meta, dataIn.randomAccessSlice(0, dataLength));
// no exception
}
dir.close();
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class TestDirectMonotonic method testSimple.
public void testSimple() throws IOException {
Directory dir = newDirectory();
final int blockShift = 2;
List<Long> actualValues = Arrays.asList(1L, 2L, 5L, 7L, 8L, 100L);
final int numValues = actualValues.size();
final long dataLength;
try (IndexOutput metaOut = dir.createOutput("meta", IOContext.DEFAULT);
IndexOutput dataOut = dir.createOutput("data", IOContext.DEFAULT)) {
DirectMonotonicWriter w = DirectMonotonicWriter.getInstance(metaOut, dataOut, numValues, blockShift);
for (long v : actualValues) {
w.add(v);
}
w.finish();
dataLength = dataOut.getFilePointer();
}
try (IndexInput metaIn = dir.openInput("meta", IOContext.READONCE);
IndexInput dataIn = dir.openInput("data", IOContext.DEFAULT)) {
DirectMonotonicReader.Meta meta = DirectMonotonicReader.loadMeta(metaIn, numValues, blockShift);
LongValues values = DirectMonotonicReader.getInstance(meta, dataIn.randomAccessSlice(0, dataLength));
for (int i = 0; i < numValues; ++i) {
final long v = values.get(i);
assertEquals(actualValues.get(i).longValue(), v);
}
}
dir.close();
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class IndexAndTaxonomyRevisionTest method testOpen.
@Test
public void testOpen() throws Exception {
Directory indexDir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter indexWriter = new IndexWriter(indexDir, conf);
Directory taxoDir = newDirectory();
SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
try {
indexWriter.addDocument(newDocument(taxoWriter));
indexWriter.commit();
taxoWriter.commit();
Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
for (Entry<String, List<RevisionFile>> e : rev.getSourceFiles().entrySet()) {
String source = e.getKey();
// silly, both directories are closed in the end
@SuppressWarnings("resource") Directory dir = source.equals(IndexAndTaxonomyRevision.INDEX_SOURCE) ? indexDir : taxoDir;
for (RevisionFile file : e.getValue()) {
IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
InputStream in = rev.open(source, file.fileName);
assertEquals(src.length(), in.available());
byte[] srcBytes = new byte[(int) src.length()];
byte[] inBytes = new byte[(int) src.length()];
int offset = 0;
if (random().nextBoolean()) {
int skip = random().nextInt(10);
if (skip >= src.length()) {
skip = 0;
}
in.skip(skip);
src.seek(skip);
offset = skip;
}
src.readBytes(srcBytes, offset, srcBytes.length - offset);
in.read(inBytes, offset, inBytes.length - offset);
assertArrayEquals(srcBytes, inBytes);
IOUtils.close(src, in);
}
}
indexWriter.close();
} finally {
IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
}
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class IndexRevisionTest method testOpen.
@Test
public void testOpen() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
IndexWriter writer = new IndexWriter(dir, conf);
try {
writer.addDocument(new Document());
writer.commit();
Revision rev = new IndexRevision(writer);
@SuppressWarnings("unchecked") Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
String source = sourceFiles.keySet().iterator().next();
for (RevisionFile file : sourceFiles.values().iterator().next()) {
IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
InputStream in = rev.open(source, file.fileName);
assertEquals(src.length(), in.available());
byte[] srcBytes = new byte[(int) src.length()];
byte[] inBytes = new byte[(int) src.length()];
int offset = 0;
if (random().nextBoolean()) {
int skip = random().nextInt(10);
if (skip >= src.length()) {
skip = 0;
}
in.skip(skip);
src.seek(skip);
offset = skip;
}
src.readBytes(srcBytes, offset, srcBytes.length - offset);
in.read(inBytes, offset, inBytes.length - offset);
assertArrayEquals(srcBytes, inBytes);
IOUtils.close(src, in);
}
writer.close();
} finally {
IOUtils.close(dir);
}
}
Aggregations