use of org.apache.lucene.search.BulkScorer in project elasticsearch by elastic.
the class CompletionSuggester method suggest.
private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSuggestDocsCollector collector) throws IOException {
query = (CompletionQuery) query.rewrite(searcher.getIndexReader());
Weight weight = query.createWeight(searcher, collector.needsScores());
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
BulkScorer scorer = weight.bulkScorer(context);
if (scorer != null) {
try {
scorer.score(collector.getLeafCollector(context), context.reader().getLiveDocs());
} catch (CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
}
}
}
}
use of org.apache.lucene.search.BulkScorer in project lucene-solr by apache.
the class DrillSidewaysQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight baseWeight = baseQuery.createWeight(searcher, needsScores, boost);
final Weight[] drillDowns = new Weight[drillDownQueries.length];
for (int dim = 0; dim < drillDownQueries.length; dim++) {
drillDowns[dim] = searcher.createNormalizedWeight(drillDownQueries[dim], false);
}
return new Weight(DrillSidewaysQuery.this) {
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return baseWeight.explain(context, doc);
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
// We can only run as a top scorer:
throw new UnsupportedOperationException();
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
Scorer baseScorer = baseWeight.scorer(context);
DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.length];
int nullCount = 0;
for (int dim = 0; dim < dims.length; dim++) {
Scorer scorer = drillDowns[dim].scorer(context);
if (scorer == null) {
nullCount++;
scorer = new ConstantScoreScorer(drillDowns[dim], 0f, DocIdSetIterator.empty());
}
dims[dim] = new DrillSidewaysScorer.DocsAndCost(scorer, drillSidewaysCollectors[dim]);
}
//if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) {
if (nullCount > 1) {
return null;
}
// Sort drill-downs by most restrictive first:
Arrays.sort(dims, new Comparator<DrillSidewaysScorer.DocsAndCost>() {
@Override
public int compare(DocsAndCost o1, DocsAndCost o2) {
return Long.compare(o1.approximation.cost(), o2.approximation.cost());
}
});
if (baseScorer == null) {
return null;
}
return new DrillSidewaysScorer(context, baseScorer, drillDownCollector, dims, scoreSubDocsAtOnce);
}
};
}
use of org.apache.lucene.search.BulkScorer in project lucene-solr by apache.
the class SuggestIndexSearcher method suggest.
/**
* Lower-level suggest API.
* Collects completion hits through <code>collector</code> for <code>query</code>.
*
* <p>{@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}
* is called for every matching completion hit.
*/
public void suggest(CompletionQuery query, TopSuggestDocsCollector collector) throws IOException {
// TODO use IndexSearcher.rewrite instead
// have to implement equals() and hashCode() in CompletionQuerys and co
query = (CompletionQuery) query.rewrite(getIndexReader());
Weight weight = query.createWeight(this, collector.needsScores(), 1f);
for (LeafReaderContext context : getIndexReader().leaves()) {
BulkScorer scorer = weight.bulkScorer(context);
if (scorer != null) {
try {
scorer.score(collector.getLeafCollector(context), context.reader().getLiveDocs());
} catch (CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
}
}
}
}
Aggregations