use of org.apache.lucene.index.TermContext in project elasticsearch by elastic.
the class BlendedTermQuery method adjustDF.
private static TermContext adjustDF(IndexReaderContext readerContext, TermContext ctx, int newDocFreq) {
assert ctx.wasBuiltFor(readerContext);
// Use a value of ttf that is consistent with the doc freq (ie. gte)
long newTTF;
if (ctx.totalTermFreq() < 0) {
newTTF = -1;
} else {
newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
}
List<LeafReaderContext> leaves = readerContext.leaves();
final int len;
if (leaves == null) {
len = 1;
} else {
len = leaves.size();
}
TermContext newCtx = new TermContext(readerContext);
for (int i = 0; i < len; ++i) {
TermState termState = ctx.get(i);
if (termState == null) {
continue;
}
newCtx.register(termState, i, newDocFreq, newTTF);
newDocFreq = 0;
newTTF = 0;
}
return newCtx;
}
use of org.apache.lucene.index.TermContext in project elasticsearch by elastic.
the class BlendedTermQuery method rewrite.
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
IndexReaderContext context = reader.getContext();
TermContext[] ctx = new TermContext[terms.length];
int[] docFreqs = new int[ctx.length];
for (int i = 0; i < terms.length; i++) {
ctx[i] = TermContext.build(context, terms[i]);
docFreqs[i] = ctx[i].docFreq();
}
final int maxDoc = reader.maxDoc();
blend(ctx, maxDoc, reader);
return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
use of org.apache.lucene.index.TermContext in project lucene-solr by apache.
the class BlendedTermQuery method rewrite.
@Override
public final Query rewrite(IndexReader reader) throws IOException {
final TermContext[] contexts = Arrays.copyOf(this.contexts, this.contexts.length);
for (int i = 0; i < contexts.length; ++i) {
if (contexts[i] == null || contexts[i].wasBuiltFor(reader.getContext()) == false) {
contexts[i] = TermContext.build(reader.getContext(), terms[i]);
}
}
// Compute aggregated doc freq and total term freq
// df will be the max of all doc freqs
// ttf will be the sum of all total term freqs
int df = 0;
long ttf = 0;
for (TermContext ctx : contexts) {
df = Math.max(df, ctx.docFreq());
if (ctx.totalTermFreq() == -1L) {
ttf = -1L;
} else if (ttf != -1L) {
ttf += ctx.totalTermFreq();
}
}
for (int i = 0; i < contexts.length; ++i) {
contexts[i] = adjustFrequencies(reader.getContext(), contexts[i], df, ttf);
}
Query[] termQueries = new Query[terms.length];
for (int i = 0; i < terms.length; ++i) {
termQueries[i] = new TermQuery(terms[i], contexts[i]);
if (boosts[i] != 1f) {
termQueries[i] = new BoostQuery(termQueries[i], boosts[i]);
}
}
return rewriteMethod.rewrite(termQueries);
}
use of org.apache.lucene.index.TermContext in project lucene-solr by apache.
the class BlendedTermQuery method adjustFrequencies.
private static TermContext adjustFrequencies(IndexReaderContext readerContext, TermContext ctx, int artificialDf, long artificialTtf) {
List<LeafReaderContext> leaves = readerContext.leaves();
final int len;
if (leaves == null) {
len = 1;
} else {
len = leaves.size();
}
TermContext newCtx = new TermContext(readerContext);
for (int i = 0; i < len; ++i) {
TermState termState = ctx.get(i);
if (termState == null) {
continue;
}
newCtx.register(termState, i);
}
newCtx.accumulateStatistics(artificialDf, artificialTtf);
return newCtx;
}
use of org.apache.lucene.index.TermContext in project lucene-solr by apache.
the class TermQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final IndexReaderContext context = searcher.getTopReaderContext();
final TermContext termState;
if (perReaderTermState == null || perReaderTermState.wasBuiltFor(context) == false) {
if (needsScores) {
// make TermQuery single-pass if we don't have a PRTS or if the context
// differs!
termState = TermContext.build(context, term);
} else {
// do not compute the term state, this will help save seeks in the terms
// dict on segments that have a cache entry for this query
termState = null;
}
} else {
// PRTS was pre-build for this IS
termState = this.perReaderTermState;
}
return new TermWeight(searcher, needsScores, boost, termState);
}
Aggregations