Search in sources :

Example 31 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.

the class ShardCoreKeyMapTests method testMissingShard.

public void testMissingShard() throws IOException {
    try (Directory dir = newDirectory();
        RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
        w.addDocument(new Document());
        try (IndexReader reader = w.getReader()) {
            ShardCoreKeyMap map = new ShardCoreKeyMap();
            for (LeafReaderContext ctx : reader.leaves()) {
                try {
                    map.add(ctx.reader());
                    fail();
                } catch (IllegalArgumentException expected) {
                // ok
                }
            }
        }
    }
}
Also used : IndexReader(org.apache.lucene.index.IndexReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Document(org.apache.lucene.document.Document) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory)

Example 32 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project elasticsearch by elastic.

the class ShardCoreKeyMapTests method testBasics.

public void testBasics() throws IOException {
    Directory dir1 = newDirectory();
    RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1);
    w1.addDocument(new Document());
    Directory dir2 = newDirectory();
    RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2);
    w2.addDocument(new Document());
    Directory dir3 = newDirectory();
    RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3);
    w3.addDocument(new Document());
    ShardId shardId1 = new ShardId("index1", "_na_", 1);
    ShardId shardId2 = new ShardId("index1", "_na_", 3);
    ShardId shardId3 = new ShardId("index2", "_na_", 2);
    ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
    ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
    ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3);
    ShardCoreKeyMap map = new ShardCoreKeyMap();
    for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
        for (LeafReaderContext ctx : reader.leaves()) {
            map.add(ctx.reader());
        }
    }
    assertEquals(3, map.size());
    // Adding them back is a no-op
    for (LeafReaderContext ctx : reader1.leaves()) {
        map.add(ctx.reader());
    }
    assertEquals(3, map.size());
    for (LeafReaderContext ctx : reader2.leaves()) {
        assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey()));
    }
    w1.addDocument(new Document());
    ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
    reader1.close();
    reader1 = newReader1;
    // same for reader2, but with a force merge to trigger evictions
    w2.addDocument(new Document());
    w2.forceMerge(1);
    ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
    reader2.close();
    reader2 = newReader2;
    for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
        for (LeafReaderContext ctx : reader.leaves()) {
            map.add(ctx.reader());
        }
    }
    final Set<Object> index1Keys = new HashSet<>();
    for (DirectoryReader reader : Arrays.asList(reader1, reader2)) {
        for (LeafReaderContext ctx : reader.leaves()) {
            index1Keys.add(ctx.reader().getCoreCacheKey());
        }
    }
    index1Keys.removeAll(map.getCoreKeysForIndex("index1"));
    assertEquals(Collections.emptySet(), index1Keys);
    reader1.close();
    w1.close();
    reader2.close();
    w2.close();
    reader3.close();
    w3.close();
    assertEquals(0, map.size());
    dir1.close();
    dir2.close();
    dir3.close();
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) ElasticsearchDirectoryReader(org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) ElasticsearchDirectoryReader(org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Document(org.apache.lucene.document.Document) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory) HashSet(java.util.HashSet)

Example 33 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project neo4j by neo4j.

the class PageOfRangesIteratorTest method shouldReadPagesOfDocumentsFromSearcher.

@Test
public void shouldReadPagesOfDocumentsFromSearcher() throws Exception {
    final int labelId = 7;
    final int pageSize = 2;
    // given
    Query query = mock(Query.class);
    IndexSearcher searcher = mock(IndexSearcher.class);
    NumericDocValues rangeNDV = mock(NumericDocValues.class);
    when(rangeNDV.get(11)).thenReturn(0x1L);
    when(rangeNDV.get(16)).thenReturn(0x2L);
    when(rangeNDV.get(37)).thenReturn(0x3L);
    NumericDocValues labelNDV = mock(NumericDocValues.class);
    when(labelNDV.get(11)).thenReturn(0x01L);
    when(labelNDV.get(16)).thenReturn(0x03L);
    when(labelNDV.get(37)).thenReturn(0x30L);
    Map<String, NumericDocValues> docValues = MapUtil.genericMap("range", rangeNDV, "7", labelNDV);
    IndexReaderStub reader = new IndexReaderStub(docValues);
    reader.setElements(new String[] { "11", "16", "37" });
    final LeafReaderContext context = reader.getContext();
    doAnswer(invocation -> {
        DocValuesCollector collector = (DocValuesCollector) invocation.getArguments()[1];
        collector.doSetNextReader(context);
        collector.collect(11);
        collector.collect(16);
        collector.collect(37);
        return null;
    }).when(searcher).search(same(query), any(DocValuesCollector.class));
    PrimitiveLongIterator iterator = concat(new PageOfRangesIterator(format, searcher, pageSize, query, Occur.MUST, labelId));
    // when
    List<Long> longs = PrimitiveLongCollections.asList(iterator);
    // then
    assertEquals(asList(/*doc1:*/
    1L << format.bitmapFormat().shift, /*doc2:*/
    2L << format.bitmapFormat().shift, (2L << format.bitmapFormat().shift) + 1, /*doc3:*/
    (3L << format.bitmapFormat().shift) + 4, (3L << format.bitmapFormat().shift) + 5), longs);
    verify(searcher, times(1)).search(same(query), any(DocValuesCollector.class));
    verify(rangeNDV, times(6)).get(anyInt());
    verify(labelNDV, times(3)).get(anyInt());
    verifyNoMoreInteractions(searcher);
    verifyNoMoreInteractions(labelNDV);
    verifyNoMoreInteractions(rangeNDV);
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) NumericDocValues(org.apache.lucene.index.NumericDocValues) Query(org.apache.lucene.search.Query) IndexReaderStub(org.neo4j.kernel.api.impl.index.IndexReaderStub) DocValuesCollector(org.neo4j.kernel.api.impl.index.collector.DocValuesCollector) PrimitiveLongIterator(org.neo4j.collection.primitive.PrimitiveLongIterator) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Test(org.junit.Test)

Example 34 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project neo4j by neo4j.

the class SimpleUniquenessVerifier method verify.

@Override
public void verify(PropertyAccessor accessor, int[] propKeyIds) throws IndexEntryConflictException, IOException {
    try {
        DuplicateCheckingCollector collector = DuplicateCheckingCollector.forProperties(accessor, propKeyIds);
        IndexSearcher searcher = indexSearcher();
        for (LeafReaderContext leafReaderContext : searcher.getIndexReader().leaves()) {
            Fields fields = leafReaderContext.reader().fields();
            for (String field : fields) {
                if (LuceneDocumentStructure.NODE_ID_KEY.equals(field)) {
                    continue;
                }
                TermsEnum terms = LuceneDocumentStructure.originalTerms(fields.terms(field), field);
                BytesRef termsRef;
                while ((termsRef = terms.next()) != null) {
                    if (terms.docFreq() > 1) {
                        collector.reset();
                        searcher.search(new TermQuery(new Term(field, termsRef)), collector);
                    }
                }
            }
        }
    } catch (IOException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IndexEntryConflictException) {
            throw (IndexEntryConflictException) cause;
        }
        throw e;
    }
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) Fields(org.apache.lucene.index.Fields) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Term(org.apache.lucene.index.Term) IOException(java.io.IOException) IndexEntryConflictException(org.neo4j.kernel.api.exceptions.index.IndexEntryConflictException) BytesRef(org.apache.lucene.util.BytesRef) TermsEnum(org.apache.lucene.index.TermsEnum)

Example 35 with LeafReaderContext

use of org.apache.lucene.index.LeafReaderContext in project crate by crate.

the class ScoreDocRowFunction method apply.

@Nullable
@Override
public Row apply(@Nullable ScoreDoc input) {
    if (input == null) {
        return null;
    }
    FieldDoc fieldDoc = (FieldDoc) input;
    scorer.score(fieldDoc.score);
    for (OrderByCollectorExpression orderByCollectorExpression : orderByCollectorExpressions) {
        orderByCollectorExpression.setNextFieldDoc(fieldDoc);
    }
    List<LeafReaderContext> leaves = indexReader.leaves();
    int readerIndex = ReaderUtil.subIndex(fieldDoc.doc, leaves);
    LeafReaderContext subReaderContext = leaves.get(readerIndex);
    int subDoc = fieldDoc.doc - subReaderContext.docBase;
    for (LuceneCollectorExpression<?> expression : expressions) {
        try {
            expression.setNextReader(subReaderContext);
        } catch (IOException e) {
            throw Throwables.propagate(e);
        }
        expression.setNextDocId(subDoc);
    }
    return inputRow;
}
Also used : FieldDoc(org.apache.lucene.search.FieldDoc) OrderByCollectorExpression(io.crate.operation.reference.doc.lucene.OrderByCollectorExpression) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) IOException(java.io.IOException) Nullable(javax.annotation.Nullable)

Aggregations

LeafReaderContext (org.apache.lucene.index.LeafReaderContext)335 LeafReader (org.apache.lucene.index.LeafReader)73 Document (org.apache.lucene.document.Document)71 IOException (java.io.IOException)69 BytesRef (org.apache.lucene.util.BytesRef)67 Directory (org.apache.lucene.store.Directory)61 Term (org.apache.lucene.index.Term)52 IndexSearcher (org.apache.lucene.search.IndexSearcher)49 IndexReader (org.apache.lucene.index.IndexReader)48 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)45 DirectoryReader (org.apache.lucene.index.DirectoryReader)44 Bits (org.apache.lucene.util.Bits)44 NumericDocValues (org.apache.lucene.index.NumericDocValues)43 ArrayList (java.util.ArrayList)41 Weight (org.apache.lucene.search.Weight)37 Terms (org.apache.lucene.index.Terms)36 DocIdSetIterator (org.apache.lucene.search.DocIdSetIterator)36 Scorer (org.apache.lucene.search.Scorer)36 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)34 Query (org.apache.lucene.search.Query)34