Search in sources :

Example 6 with TwoPhaseIterator

use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.

the class TestDrillSideways method testRandom.

public void testRandom() throws Exception {
    while (aChance == 0.0) {
        aChance = random().nextDouble();
    }
    while (bChance == 0.0) {
        bChance = random().nextDouble();
    }
    while (cChance == 0.0) {
        cChance = random().nextDouble();
    }
    //aChance = .01;
    //bChance = 0.5;
    //cChance = 1.0;
    double sum = aChance + bChance + cChance;
    aChance /= sum;
    bChance /= sum;
    cChance /= sum;
    int numDims = TestUtil.nextInt(random(), 2, 5);
    //int numDims = 3;
    int numDocs = atLeast(3000);
    //int numDocs = 20;
    if (VERBOSE) {
        System.out.println("numDims=" + numDims + " numDocs=" + numDocs + " aChance=" + aChance + " bChance=" + bChance + " cChance=" + cChance);
    }
    String[][] dimValues = new String[numDims][];
    int valueCount = 2;
    for (int dim = 0; dim < numDims; dim++) {
        Set<String> values = new HashSet<>();
        while (values.size() < valueCount) {
            String s = TestUtil.randomRealisticUnicodeString(random());
            //String s = _TestUtil.randomString(random());
            if (s.length() > 0) {
                values.add(s);
            }
        }
        dimValues[dim] = values.toArray(new String[values.size()]);
        valueCount *= 2;
    }
    List<Doc> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        Doc doc = new Doc();
        doc.id = "" + i;
        doc.contentToken = randomContentToken(false);
        doc.dims = new int[numDims];
        doc.dims2 = new int[numDims];
        for (int dim = 0; dim < numDims; dim++) {
            if (random().nextInt(5) == 3) {
                // This doc is missing this dim:
                doc.dims[dim] = -1;
            } else if (dimValues[dim].length <= 4) {
                int dimUpto = 0;
                doc.dims[dim] = dimValues[dim].length - 1;
                while (dimUpto < dimValues[dim].length) {
                    if (random().nextBoolean()) {
                        doc.dims[dim] = dimUpto;
                        break;
                    }
                    dimUpto++;
                }
            } else {
                doc.dims[dim] = random().nextInt(dimValues[dim].length);
            }
            if (random().nextInt(5) == 3) {
                // 2nd value:
                doc.dims2[dim] = random().nextInt(dimValues[dim].length);
            } else {
                doc.dims2[dim] = -1;
            }
        }
        docs.add(doc);
    }
    Directory d = newDirectory();
    Directory td = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
    iwc.setInfoStream(InfoStream.NO_OUTPUT);
    RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
    DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
    FacetsConfig config = new FacetsConfig();
    for (int i = 0; i < numDims; i++) {
        config.setMultiValued("dim" + i, true);
    }
    boolean doUseDV = random().nextBoolean();
    for (Doc rawDoc : docs) {
        Document doc = new Document();
        doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
        doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
        doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
        if (VERBOSE) {
            System.out.println("  doc id=" + rawDoc.id + " token=" + rawDoc.contentToken);
        }
        for (int dim = 0; dim < numDims; dim++) {
            int dimValue = rawDoc.dims[dim];
            if (dimValue != -1) {
                if (doUseDV) {
                    doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue]));
                } else {
                    doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue]));
                }
                doc.add(new StringField("dim" + dim, dimValues[dim][dimValue], Field.Store.YES));
                if (VERBOSE) {
                    System.out.println("    dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue]));
                }
            }
            int dimValue2 = rawDoc.dims2[dim];
            if (dimValue2 != -1) {
                if (doUseDV) {
                    doc.add(new SortedSetDocValuesFacetField("dim" + dim, dimValues[dim][dimValue2]));
                } else {
                    doc.add(new FacetField("dim" + dim, dimValues[dim][dimValue2]));
                }
                doc.add(new StringField("dim" + dim, dimValues[dim][dimValue2], Field.Store.YES));
                if (VERBOSE) {
                    System.out.println("      dim" + dim + "=" + new BytesRef(dimValues[dim][dimValue2]));
                }
            }
        }
        w.addDocument(config.build(tw, doc));
    }
    if (random().nextBoolean()) {
        // Randomly delete a few docs:
        int numDel = TestUtil.nextInt(random(), 1, (int) (numDocs * 0.05));
        if (VERBOSE) {
            System.out.println("delete " + numDel);
        }
        int delCount = 0;
        while (delCount < numDel) {
            Doc doc = docs.get(random().nextInt(docs.size()));
            if (!doc.deleted) {
                if (VERBOSE) {
                    System.out.println("  delete id=" + doc.id);
                }
                doc.deleted = true;
                w.deleteDocuments(new Term("id", doc.id));
                delCount++;
            }
        }
    }
    if (random().nextBoolean()) {
        if (VERBOSE) {
            System.out.println("TEST: forceMerge(1)...");
        }
        w.forceMerge(1);
    }
    IndexReader r = w.getReader();
    final SortedSetDocValuesReaderState sortedSetDVState;
    IndexSearcher s = newSearcher(r);
    if (doUseDV) {
        sortedSetDVState = new DefaultSortedSetDocValuesReaderState(s.getIndexReader());
    } else {
        sortedSetDVState = null;
    }
    if (VERBOSE) {
        System.out.println("r.numDocs() = " + r.numDocs());
    }
    // NRT open
    TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
    int numIters = atLeast(10);
    for (int iter = 0; iter < numIters; iter++) {
        String contentToken = random().nextInt(30) == 17 ? null : randomContentToken(true);
        int numDrillDown = TestUtil.nextInt(random(), 1, Math.min(4, numDims));
        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " baseQuery=" + contentToken + " numDrillDown=" + numDrillDown + " useSortedSetDV=" + doUseDV);
        }
        String[][] drillDowns = new String[numDims][];
        int count = 0;
        boolean anyMultiValuedDrillDowns = false;
        while (count < numDrillDown) {
            int dim = random().nextInt(numDims);
            if (drillDowns[dim] == null) {
                if (random().nextBoolean()) {
                    // Drill down on one value:
                    drillDowns[dim] = new String[] { dimValues[dim][random().nextInt(dimValues[dim].length)] };
                } else {
                    int orCount = TestUtil.nextInt(random(), 1, Math.min(5, dimValues[dim].length));
                    drillDowns[dim] = new String[orCount];
                    anyMultiValuedDrillDowns |= orCount > 1;
                    for (int i = 0; i < orCount; i++) {
                        while (true) {
                            String value = dimValues[dim][random().nextInt(dimValues[dim].length)];
                            for (int j = 0; j < i; j++) {
                                if (value.equals(drillDowns[dim][j])) {
                                    value = null;
                                    break;
                                }
                            }
                            if (value != null) {
                                drillDowns[dim][i] = value;
                                break;
                            }
                        }
                    }
                }
                if (VERBOSE) {
                    BytesRef[] values = new BytesRef[drillDowns[dim].length];
                    for (int i = 0; i < values.length; i++) {
                        values[i] = new BytesRef(drillDowns[dim][i]);
                    }
                    System.out.println("  dim" + dim + "=" + Arrays.toString(values));
                }
                count++;
            }
        }
        Query baseQuery;
        if (contentToken == null) {
            baseQuery = new MatchAllDocsQuery();
        } else {
            baseQuery = new TermQuery(new Term("content", contentToken));
        }
        DrillDownQuery ddq = new DrillDownQuery(config, baseQuery);
        for (int dim = 0; dim < numDims; dim++) {
            if (drillDowns[dim] != null) {
                for (String value : drillDowns[dim]) {
                    ddq.add("dim" + dim, value);
                }
            }
        }
        Query filter;
        if (random().nextInt(7) == 6) {
            if (VERBOSE) {
                System.out.println("  only-even filter");
            }
            filter = new Query() {

                @Override
                public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
                    return new ConstantScoreWeight(this, boost) {

                        @Override
                        public Scorer scorer(LeafReaderContext context) throws IOException {
                            DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
                            return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {

                                @Override
                                public boolean matches() throws IOException {
                                    int docID = approximation.docID();
                                    return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
                                }

                                @Override
                                public float matchCost() {
                                    return 1000f;
                                }
                            });
                        }
                    };
                }

                @Override
                public String toString(String field) {
                    return "drillSidewaysTestFilter";
                }

                @Override
                public boolean equals(Object o) {
                    return o == this;
                }

                @Override
                public int hashCode() {
                    return System.identityHashCode(this);
                }
            };
        } else {
            filter = null;
        }
        // Verify docs are always collected in order.  If we
        // had an AssertingScorer it could catch it when
        // Weight.scoresDocsOutOfOrder lies!:
        getNewDrillSideways(s, config, tr).search(ddq, new SimpleCollector() {

            int lastDocID;

            @Override
            public void collect(int doc) {
                assert doc > lastDocID;
                lastDocID = doc;
            }

            @Override
            protected void doSetNextReader(LeafReaderContext context) throws IOException {
                lastDocID = -1;
            }

            @Override
            public boolean needsScores() {
                return false;
            }
        });
        // subScorers are on the same docID:
        if (!anyMultiValuedDrillDowns) {
            // Can only do this test when there are no OR'd
            // drill-down values, because in that case it's
            // easily possible for one of the DD terms to be on
            // a future docID:
            getNewDrillSidewaysScoreSubdocsAtOnce(s, config, tr).search(ddq, new AssertingSubDocsAtOnceCollector());
        }
        TestFacetResult expected = slowDrillSidewaysSearch(s, docs, contentToken, drillDowns, dimValues, filter);
        Sort sort = new Sort(new SortField("id", SortField.Type.STRING));
        DrillSideways ds;
        if (doUseDV) {
            ds = getNewDrillSideways(s, config, sortedSetDVState);
        } else {
            ds = getNewDrillSidewaysBuildFacetsResult(s, config, tr);
        }
        // Retrieve all facets:
        DrillSidewaysResult actual = ds.search(ddq, filter, null, numDocs, sort, true, true);
        TopDocs hits = s.search(baseQuery, numDocs);
        Map<String, Float> scores = new HashMap<>();
        for (ScoreDoc sd : hits.scoreDocs) {
            scores.put(s.doc(sd.doc).get("id"), sd.score);
        }
        if (VERBOSE) {
            System.out.println("  verify all facets");
        }
        verifyEquals(dimValues, s, expected, actual, scores, doUseDV);
        // Make sure drill down doesn't change score:
        Query q = ddq;
        if (filter != null) {
            q = new BooleanQuery.Builder().add(q, Occur.MUST).add(filter, Occur.FILTER).build();
        }
        TopDocs ddqHits = s.search(q, numDocs);
        assertEquals(expected.hits.size(), ddqHits.totalHits);
        for (int i = 0; i < expected.hits.size(); i++) {
            // Score should be IDENTICAL:
            assertEquals(scores.get(expected.hits.get(i).id), ddqHits.scoreDocs[i].score, 0.0f);
        }
    }
    w.close();
    IOUtils.close(r, tr, tw, d, td);
}
Also used : Query(org.apache.lucene.search.Query) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) Scorer(org.apache.lucene.search.Scorer) SortField(org.apache.lucene.search.SortField) Document(org.apache.lucene.document.Document) ScoreDoc(org.apache.lucene.search.ScoreDoc) SimpleCollector(org.apache.lucene.search.SimpleCollector) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) ScoreDoc(org.apache.lucene.search.ScoreDoc) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Sort(org.apache.lucene.search.Sort) HashSet(java.util.HashSet) DirectoryTaxonomyReader(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader) SortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState) DefaultSortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState) TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) Term(org.apache.lucene.index.Term) Weight(org.apache.lucene.search.Weight) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight) DrillSidewaysResult(org.apache.lucene.facet.DrillSideways.DrillSidewaysResult) StringField(org.apache.lucene.document.StringField) SortedSetDocValuesFacetField(org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) IndexSearcher(org.apache.lucene.search.IndexSearcher) SortedSetDocValuesFacetField(org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField) DirectoryTaxonomyWriter(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter) TopDocs(org.apache.lucene.search.TopDocs) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) BytesRef(org.apache.lucene.util.BytesRef) Directory(org.apache.lucene.store.Directory) TermQuery(org.apache.lucene.search.TermQuery) TaxonomyReader(org.apache.lucene.facet.taxonomy.TaxonomyReader) DirectoryTaxonomyReader(org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader) IOException(java.io.IOException) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight) DefaultSortedSetDocValuesReaderState(org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState) IndexReader(org.apache.lucene.index.IndexReader) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 7 with TwoPhaseIterator

use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.

the class PhraseHelper method getTermToSpans.

// code extracted & refactored from WSTE.extractWeightedSpanTerms()
private void getTermToSpans(SpanQuery spanQuery, LeafReaderContext readerContext, int doc, Map<BytesRef, Spans> result) throws IOException {
    // note: in WSTE there was some field specific looping that seemed pointless so that isn't here.
    final IndexSearcher searcher = new IndexSearcher(readerContext.reader());
    searcher.setQueryCache(null);
    if (willRewrite) {
        // searcher.rewrite loops till done
        spanQuery = (SpanQuery) searcher.rewrite(spanQuery);
    }
    // Get the underlying query terms
    // sorted so we can loop over results in order shortly...
    TreeSet<Term> termSet = new FieldFilteringTermSet();
    //needsScores==false
    searcher.createWeight(spanQuery, false, 1.0f).extractTerms(termSet);
    // Get Spans by running the query against the reader
    // TODO it might make sense to re-use/cache the Spans instance, to advance forward between docs
    SpanWeight spanWeight = (SpanWeight) searcher.createNormalizedWeight(spanQuery, false);
    Spans spans = spanWeight.getSpans(readerContext, SpanWeight.Postings.POSITIONS);
    if (spans == null) {
        return;
    }
    TwoPhaseIterator twoPhaseIterator = spans.asTwoPhaseIterator();
    if (twoPhaseIterator != null) {
        if (twoPhaseIterator.approximation().advance(doc) != doc || !twoPhaseIterator.matches()) {
            return;
        }
    } else if (spans.advance(doc) != doc) {
        // preposition, and return doing nothing if find none
        return;
    }
    // Consume the Spans into a cache.  This instance is used as a source for multiple cloned copies.
    // It's important we do this and not re-use the same original Spans instance since these will be iterated
    // independently later on; sometimes in ways that prevents sharing the original Spans.
    // consumes spans for this doc only and caches
    CachedSpans cachedSpansSource = new CachedSpans(spans);
    // we don't use it below
    spans = null;
    // Map terms to a Spans instance (aggregate if necessary)
    for (final Term queryTerm : termSet) {
        //   spanQuery list were already filtered by these conditions.
        if (positionInsensitiveTerms.contains(queryTerm)) {
            continue;
        }
        // copy-constructor refers to same data (shallow) but has iteration state from the beginning
        CachedSpans cachedSpans = new CachedSpans(cachedSpansSource);
        // Add the span to whatever span may or may not exist
        Spans existingSpans = result.get(queryTerm.bytes());
        if (existingSpans != null) {
            if (existingSpans instanceof MultiSpans) {
                ((MultiSpans) existingSpans).addSpans(cachedSpans);
            } else {
                // upgrade to MultiSpans
                MultiSpans multiSpans = new MultiSpans();
                multiSpans.addSpans(existingSpans);
                multiSpans.addSpans(cachedSpans);
                result.put(queryTerm.bytes(), multiSpans);
            }
        } else {
            result.put(queryTerm.bytes(), cachedSpans);
        }
    }
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) SpanWeight(org.apache.lucene.search.spans.SpanWeight) Term(org.apache.lucene.index.Term) WeightedSpanTerm(org.apache.lucene.search.highlight.WeightedSpanTerm) Spans(org.apache.lucene.search.spans.Spans)

Example 8 with TwoPhaseIterator

use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.

the class IntersectsRPTVerifyQuery method createWeight.

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
    final Map valueSourceContext = ValueSource.newContext(searcher);
    return new ConstantScoreWeight(this, boost) {

        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            // Compute approx & exact
            final IntersectsDifferentiatingQuery.IntersectsDifferentiatingVisitor result = intersectsDiffQuery.compute(context);
            if (result.approxDocIdSet == null) {
                return null;
            }
            final DocIdSetIterator approxDISI = result.approxDocIdSet.iterator();
            if (approxDISI == null) {
                return null;
            }
            final DocIdSetIterator exactIterator;
            if (result.exactDocIdSet != null) {
                // If both sets are the same, there's nothing to verify; we needn't return a TwoPhaseIterator
                if (result.approxDocIdSet == result.exactDocIdSet) {
                    return new ConstantScoreScorer(this, score(), approxDISI);
                }
                exactIterator = result.exactDocIdSet.iterator();
                assert exactIterator != null;
            } else {
                exactIterator = null;
            }
            final FunctionValues predFuncValues = predicateValueSource.getValues(valueSourceContext, context);
            final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approxDISI) {

                @Override
                public boolean matches() throws IOException {
                    final int doc = approxDISI.docID();
                    if (exactIterator != null) {
                        if (exactIterator.docID() < doc) {
                            exactIterator.advance(doc);
                        }
                        if (exactIterator.docID() == doc) {
                            return true;
                        }
                    }
                    return predFuncValues.boolVal(doc);
                }

                @Override
                public float matchCost() {
                    // TODO: use cost of exactIterator.advance() and predFuncValues.boolVal()
                    return 100;
                }
            };
            return new ConstantScoreScorer(this, score(), twoPhaseIterator);
        }
    };
}
Also used : TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) FunctionValues(org.apache.lucene.queries.function.FunctionValues) Map(java.util.Map) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight)

Example 9 with TwoPhaseIterator

use of org.apache.lucene.search.TwoPhaseIterator in project lucene-solr by apache.

the class SortedNumericDocValuesRangeQuery method createWeight.

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
    return new ConstantScoreWeight(this, boost) {

        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            SortedNumericDocValues values = getValues(context.reader(), field);
            if (values == null) {
                return null;
            }
            final NumericDocValues singleton = DocValues.unwrapSingleton(values);
            final TwoPhaseIterator iterator;
            if (singleton != null) {
                iterator = new TwoPhaseIterator(singleton) {

                    @Override
                    public boolean matches() throws IOException {
                        final long value = singleton.longValue();
                        return value >= lowerValue && value <= upperValue;
                    }

                    @Override
                    public float matchCost() {
                        // 2 comparisons
                        return 2;
                    }
                };
            } else {
                iterator = new TwoPhaseIterator(values) {

                    @Override
                    public boolean matches() throws IOException {
                        for (int i = 0, count = values.docValueCount(); i < count; ++i) {
                            final long value = values.nextValue();
                            if (value < lowerValue) {
                                continue;
                            }
                            // Values are sorted, so the first value that is >= lowerValue is our best candidate
                            return value <= upperValue;
                        }
                        // all values were < lowerValue
                        return false;
                    }

                    @Override
                    public float matchCost() {
                        // 2 comparisons
                        return 2;
                    }
                };
            }
            return new ConstantScoreScorer(this, score(), iterator);
        }
    };
}
Also used : NumericDocValues(org.apache.lucene.index.NumericDocValues) SortedNumericDocValues(org.apache.lucene.index.SortedNumericDocValues) SortedNumericDocValues(org.apache.lucene.index.SortedNumericDocValues) TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) ConstantScoreScorer(org.apache.lucene.search.ConstantScoreScorer) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) IOException(java.io.IOException) ConstantScoreWeight(org.apache.lucene.search.ConstantScoreWeight)

Example 10 with TwoPhaseIterator

use of org.apache.lucene.search.TwoPhaseIterator in project pyramid by cheng-li.

the class CustomConjunctionSpans method asTwoPhaseIterator.

/**
 * Return a {@link TwoPhaseIterator} view of this ConjunctionSpans.
 */
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
    float totalMatchCost = 0;
    // Compute the matchCost as the total matchCost/positionsCostant of the sub spans.
    for (Spans spans : subSpans) {
        TwoPhaseIterator tpi = spans.asTwoPhaseIterator();
        if (tpi != null) {
            totalMatchCost += tpi.matchCost();
        } else {
            totalMatchCost += spans.positionsCost();
        }
    }
    final float matchCost = totalMatchCost;
    return new TwoPhaseIterator(conjunction) {

        @Override
        public boolean matches() throws IOException {
            return twoPhaseCurrentDocMatches();
        }

        @Override
        public float matchCost() {
            return matchCost;
        }
    };
}
Also used : TwoPhaseIterator(org.apache.lucene.search.TwoPhaseIterator) Spans(org.apache.lucene.search.spans.Spans)

Aggregations

TwoPhaseIterator (org.apache.lucene.search.TwoPhaseIterator)20 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)13 ConstantScoreScorer (org.apache.lucene.search.ConstantScoreScorer)11 ConstantScoreWeight (org.apache.lucene.search.ConstantScoreWeight)9 DocIdSetIterator (org.apache.lucene.search.DocIdSetIterator)9 Scorer (org.apache.lucene.search.Scorer)7 Weight (org.apache.lucene.search.Weight)7 IOException (java.io.IOException)6 IndexSearcher (org.apache.lucene.search.IndexSearcher)4 Set (java.util.Set)3 SortedNumericDocValues (org.apache.lucene.index.SortedNumericDocValues)3 Query (org.apache.lucene.search.Query)3 Bits (org.apache.lucene.util.Bits)3 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 Document (org.apache.lucene.document.Document)2 IndexReader (org.apache.lucene.index.IndexReader)2 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)2 Term (org.apache.lucene.index.Term)2 FunctionValues (org.apache.lucene.queries.function.FunctionValues)2