use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testCheckIntegrity.
public void testCheckIntegrity() throws IOException {
Directory dir = newDirectory();
long luceneFileLength = 0;
try (IndexOutput output = dir.createOutput("lucene_checksum.bin", IOContext.DEFAULT)) {
int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
luceneFileLength += bytesRef.length;
}
CodecUtil.writeFooter(output);
luceneFileLength += CodecUtil.footerLength();
}
final long luceneChecksum;
try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
assertEquals(luceneFileLength, indexInput.length());
luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
}
dir.close();
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testVerifyingIndexInput.
public void testVerifyingIndexInput() throws IOException {
Directory dir = newDirectory();
IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
CodecUtil.writeFooter(output);
output.close();
// Check file
IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
long checksum = CodecUtil.retrieveChecksum(indexInput);
indexInput.seek(0);
IndexInput verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo.bar", IOContext.DEFAULT));
readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
Store.verify(verifyingIndexInput);
assertThat(checksum, equalTo(((ChecksumIndexInput) verifyingIndexInput).getChecksum()));
IOUtils.close(indexInput, verifyingIndexInput);
// Corrupt file and check again
corruptFile(dir, "foo.bar", "foo1.bar");
verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo1.bar", IOContext.DEFAULT));
readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
try {
Store.verify(verifyingIndexInput);
fail("should be a corrupted index");
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// ok
}
IOUtils.close(verifyingIndexInput);
IOUtils.close(dir);
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testCanOpenIndex.
public void testCanOpenIndex() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
IndexWriterConfig iwc = newIndexWriterConfig();
Path tempDir = createTempDir();
final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
writer.close();
assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) {
@Override
public Directory newDirectory() throws IOException {
return dir;
}
};
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
store.close();
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class IndexSearcherWrapperTests method testReaderCloseListenerIsCalled.
public void testReaderCloseListenerIsCalled() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1));
IndexSearcher searcher = new IndexSearcher(open);
assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits);
final AtomicInteger closeCalls = new AtomicInteger(0);
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
return new FieldMaskingReader("field", reader, closeCalls);
}
@Override
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
return searcher;
}
};
final int sourceRefCount = open.getRefCount();
final AtomicInteger count = new AtomicInteger();
final AtomicInteger outerCount = new AtomicInteger();
try (Engine.Searcher engineSearcher = new Engine.Searcher("foo", searcher)) {
final Engine.Searcher wrap = wrapper.wrap(engineSearcher);
assertEquals(1, wrap.reader().getRefCount());
ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), reader -> {
if (reader == open) {
count.incrementAndGet();
}
outerCount.incrementAndGet();
});
assertEquals(0, wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1).totalHits);
wrap.close();
assertFalse("wrapped reader is closed", wrap.reader().tryIncRef());
assertEquals(sourceRefCount, open.getRefCount());
}
assertEquals(1, closeCalls.get());
IOUtils.close(open, writer, dir);
assertEquals(1, outerCount.get());
assertEquals(1, count.get());
assertEquals(0, open.getRefCount());
assertEquals(1, closeCalls.get());
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class IndexSearcherWrapperTests method testIsCacheable.
public void testIsCacheable() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1));
IndexSearcher searcher = new IndexSearcher(open);
assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits);
searcher.setSimilarity(iwc.getSimilarity());
final AtomicInteger closeCalls = new AtomicInteger(0);
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
return new FieldMaskingReader("field", reader, closeCalls);
}
@Override
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
return searcher;
}
};
final ConcurrentHashMap<Object, TopDocs> cache = new ConcurrentHashMap<>();
try (Engine.Searcher engineSearcher = new Engine.Searcher("foo", searcher)) {
try (Engine.Searcher wrap = wrapper.wrap(engineSearcher)) {
ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), reader -> {
cache.remove(reader.getCoreCacheKey());
});
TopDocs search = wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1);
cache.put(wrap.reader().getCoreCacheKey(), search);
}
}
assertEquals(1, closeCalls.get());
assertEquals(1, cache.size());
IOUtils.close(open, writer, dir);
assertEquals(0, cache.size());
assertEquals(1, closeCalls.get());
}
Aggregations