use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class MockEngineSupport method newSearcher.
public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
IndexReader reader = searcher.getIndexReader();
IndexReader wrappedReader = reader;
assert reader != null;
if (reader instanceof DirectoryReader && mockContext.wrapReader) {
wrappedReader = wrapReader((DirectoryReader) reader);
}
// this executes basic query checks and asserts that weights are normalized only once etc.
final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader);
assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true));
assertingIndexSearcher.setQueryCache(filterCache);
assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy);
return assertingIndexSearcher;
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class LuceneTests method testCleanIndex.
public void testCleanIndex() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setMaxBufferedDocs(2);
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
doc = new Document();
doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
doc = new Document();
doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.deleteDocuments(new Term("id", "2"));
writer.commit();
try (DirectoryReader open = DirectoryReader.open(writer)) {
assertEquals(3, open.numDocs());
assertEquals(1, open.numDeletedDocs());
assertEquals(4, open.maxDoc());
}
writer.close();
if (random().nextBoolean()) {
for (String file : dir.listAll()) {
if (file.startsWith("_1")) {
// delete a random file
dir.deleteFile(file);
break;
}
}
}
Lucene.cleanLuceneIndex(dir);
if (dir.listAll().length > 0) {
for (String file : dir.listAll()) {
if (file.startsWith("extra") == false) {
assertEquals(file, "write.lock");
}
}
}
dir.close();
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class ShardCoreKeyMapTests method testBasics.
public void testBasics() throws IOException {
Directory dir1 = newDirectory();
RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1);
w1.addDocument(new Document());
Directory dir2 = newDirectory();
RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2);
w2.addDocument(new Document());
Directory dir3 = newDirectory();
RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3);
w3.addDocument(new Document());
ShardId shardId1 = new ShardId("index1", "_na_", 1);
ShardId shardId2 = new ShardId("index1", "_na_", 3);
ShardId shardId3 = new ShardId("index2", "_na_", 2);
ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3);
ShardCoreKeyMap map = new ShardCoreKeyMap();
for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
for (LeafReaderContext ctx : reader.leaves()) {
map.add(ctx.reader());
}
}
assertEquals(3, map.size());
// Adding them back is a no-op
for (LeafReaderContext ctx : reader1.leaves()) {
map.add(ctx.reader());
}
assertEquals(3, map.size());
for (LeafReaderContext ctx : reader2.leaves()) {
assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey()));
}
w1.addDocument(new Document());
ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
reader1.close();
reader1 = newReader1;
// same for reader2, but with a force merge to trigger evictions
w2.addDocument(new Document());
w2.forceMerge(1);
ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
reader2.close();
reader2 = newReader2;
for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
for (LeafReaderContext ctx : reader.leaves()) {
map.add(ctx.reader());
}
}
final Set<Object> index1Keys = new HashSet<>();
for (DirectoryReader reader : Arrays.asList(reader1, reader2)) {
for (LeafReaderContext ctx : reader.leaves()) {
index1Keys.add(ctx.reader().getCoreCacheKey());
}
}
index1Keys.removeAll(map.getCoreKeysForIndex("index1"));
assertEquals(Collections.emptySet(), index1Keys);
reader1.close();
w1.close();
reader2.close();
w2.close();
reader3.close();
w3.close();
assertEquals(0, map.size());
dir1.close();
dir2.close();
dir3.close();
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class VersionsTests method testNestedDocuments.
public void testNestedDocuments() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
List<Document> docs = new ArrayList<>();
for (int i = 0; i < 4; ++i) {
// Nested
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
docs.add(doc);
}
// Root
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
doc.add(version);
docs.add(doc);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5L));
version.setLongValue(6L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
version.setLongValue(7L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7L));
writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
directoryReader.close();
writer.close();
dir.close();
}
use of org.apache.lucene.index.DirectoryReader in project elasticsearch by elastic.
the class SimpleLuceneTests method testNRTSearchOnClosedWriter.
public void testNRTSearchOnClosedWriter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
DirectoryReader reader = DirectoryReader.open(indexWriter);
for (int i = 0; i < 100; i++) {
Document document = new Document();
TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
field.setBoost(i);
document.add(field);
indexWriter.addDocument(document);
}
reader = refreshReader(reader);
indexWriter.close();
TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator();
termDocs.next();
}
Aggregations