use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class ShardCoreKeyMapTests method testMissingShard.
public void testMissingShard() throws IOException {
try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
w.addDocument(new Document());
try (IndexReader reader = w.getReader()) {
ShardCoreKeyMap map = new ShardCoreKeyMap();
for (LeafReaderContext ctx : reader.leaves()) {
try {
map.add(ctx.reader());
fail();
} catch (IllegalArgumentException expected) {
// ok
}
}
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.
the class ShardCoreKeyMapTests method testBasics.
public void testBasics() throws IOException {
Directory dir1 = newDirectory();
RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1);
w1.addDocument(new Document());
Directory dir2 = newDirectory();
RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2);
w2.addDocument(new Document());
Directory dir3 = newDirectory();
RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3);
w3.addDocument(new Document());
ShardId shardId1 = new ShardId("index1", "_na_", 1);
ShardId shardId2 = new ShardId("index1", "_na_", 3);
ShardId shardId3 = new ShardId("index2", "_na_", 2);
ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3);
ShardCoreKeyMap map = new ShardCoreKeyMap();
for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
for (LeafReaderContext ctx : reader.leaves()) {
map.add(ctx.reader());
}
}
assertEquals(3, map.size());
// Adding them back is a no-op
for (LeafReaderContext ctx : reader1.leaves()) {
map.add(ctx.reader());
}
assertEquals(3, map.size());
for (LeafReaderContext ctx : reader2.leaves()) {
assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey()));
}
w1.addDocument(new Document());
ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
reader1.close();
reader1 = newReader1;
// same for reader2, but with a force merge to trigger evictions
w2.addDocument(new Document());
w2.forceMerge(1);
ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
reader2.close();
reader2 = newReader2;
for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
for (LeafReaderContext ctx : reader.leaves()) {
map.add(ctx.reader());
}
}
final Set<Object> index1Keys = new HashSet<>();
for (DirectoryReader reader : Arrays.asList(reader1, reader2)) {
for (LeafReaderContext ctx : reader.leaves()) {
index1Keys.add(ctx.reader().getCoreCacheKey());
}
}
index1Keys.removeAll(map.getCoreKeysForIndex("index1"));
assertEquals(Collections.emptySet(), index1Keys);
reader1.close();
w1.close();
reader2.close();
w2.close();
reader3.close();
w3.close();
assertEquals(0, map.size());
dir1.close();
dir2.close();
dir3.close();
}
use of org.apache.lucene.index.LeafReaderContext in project neo4j by neo4j.
the class PageOfRangesIteratorTest method shouldReadPagesOfDocumentsFromSearcher.
@Test
public void shouldReadPagesOfDocumentsFromSearcher() throws Exception {
final int labelId = 7;
final int pageSize = 2;
// given
Query query = mock(Query.class);
IndexSearcher searcher = mock(IndexSearcher.class);
NumericDocValues rangeNDV = mock(NumericDocValues.class);
when(rangeNDV.get(11)).thenReturn(0x1L);
when(rangeNDV.get(16)).thenReturn(0x2L);
when(rangeNDV.get(37)).thenReturn(0x3L);
NumericDocValues labelNDV = mock(NumericDocValues.class);
when(labelNDV.get(11)).thenReturn(0x01L);
when(labelNDV.get(16)).thenReturn(0x03L);
when(labelNDV.get(37)).thenReturn(0x30L);
Map<String, NumericDocValues> docValues = MapUtil.genericMap("range", rangeNDV, "7", labelNDV);
IndexReaderStub reader = new IndexReaderStub(docValues);
reader.setElements(new String[] { "11", "16", "37" });
final LeafReaderContext context = reader.getContext();
doAnswer(invocation -> {
DocValuesCollector collector = (DocValuesCollector) invocation.getArguments()[1];
collector.doSetNextReader(context);
collector.collect(11);
collector.collect(16);
collector.collect(37);
return null;
}).when(searcher).search(same(query), any(DocValuesCollector.class));
PrimitiveLongIterator iterator = concat(new PageOfRangesIterator(format, searcher, pageSize, query, Occur.MUST, labelId));
// when
List<Long> longs = PrimitiveLongCollections.asList(iterator);
// then
assertEquals(asList(/*doc1:*/
1L << format.bitmapFormat().shift, /*doc2:*/
2L << format.bitmapFormat().shift, (2L << format.bitmapFormat().shift) + 1, /*doc3:*/
(3L << format.bitmapFormat().shift) + 4, (3L << format.bitmapFormat().shift) + 5), longs);
verify(searcher, times(1)).search(same(query), any(DocValuesCollector.class));
verify(rangeNDV, times(6)).get(anyInt());
verify(labelNDV, times(3)).get(anyInt());
verifyNoMoreInteractions(searcher);
verifyNoMoreInteractions(labelNDV);
verifyNoMoreInteractions(rangeNDV);
}
use of org.apache.lucene.index.LeafReaderContext in project neo4j by neo4j.
the class SimpleUniquenessVerifier method verify.
@Override
public void verify(PropertyAccessor accessor, int[] propKeyIds) throws IndexEntryConflictException, IOException {
try {
DuplicateCheckingCollector collector = DuplicateCheckingCollector.forProperties(accessor, propKeyIds);
IndexSearcher searcher = indexSearcher();
for (LeafReaderContext leafReaderContext : searcher.getIndexReader().leaves()) {
Fields fields = leafReaderContext.reader().fields();
for (String field : fields) {
if (LuceneDocumentStructure.NODE_ID_KEY.equals(field)) {
continue;
}
TermsEnum terms = LuceneDocumentStructure.originalTerms(fields.terms(field), field);
BytesRef termsRef;
while ((termsRef = terms.next()) != null) {
if (terms.docFreq() > 1) {
collector.reset();
searcher.search(new TermQuery(new Term(field, termsRef)), collector);
}
}
}
}
} catch (IOException e) {
Throwable cause = e.getCause();
if (cause instanceof IndexEntryConflictException) {
throw (IndexEntryConflictException) cause;
}
throw e;
}
}
use of org.apache.lucene.index.LeafReaderContext in project crate by crate.
the class ScoreDocRowFunction method apply.
@Nullable
@Override
public Row apply(@Nullable ScoreDoc input) {
if (input == null) {
return null;
}
FieldDoc fieldDoc = (FieldDoc) input;
scorer.score(fieldDoc.score);
for (OrderByCollectorExpression orderByCollectorExpression : orderByCollectorExpressions) {
orderByCollectorExpression.setNextFieldDoc(fieldDoc);
}
List<LeafReaderContext> leaves = indexReader.leaves();
int readerIndex = ReaderUtil.subIndex(fieldDoc.doc, leaves);
LeafReaderContext subReaderContext = leaves.get(readerIndex);
int subDoc = fieldDoc.doc - subReaderContext.docBase;
for (LuceneCollectorExpression<?> expression : expressions) {
try {
expression.setNextReader(subReaderContext);
} catch (IOException e) {
throw Throwables.propagate(e);
}
expression.setNextDocId(subDoc);
}
return inputRow;
}
Aggregations