use of org.apache.lucene.search.Explanation in project elasticsearch by elastic.
the class SimpleNestedIT method testExplain.
public void testExplain() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("nested1").field("type", "nested").endObject().endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field1", "value1").startArray("nested1").startObject().field("n_field1", "n_value1").endObject().startObject().field("n_field1", "n_value1").endObject().endArray().endObject()).setRefreshPolicy(IMMEDIATE).execute().actionGet();
SearchResponse searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)).setExplain(true).execute().actionGet();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
Explanation explanation = searchResponse.getHits().getHits()[0].getExplanation();
assertThat(explanation.getValue(), equalTo(searchResponse.getHits().getHits()[0].getScore()));
assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1"));
}
use of org.apache.lucene.search.Explanation in project lucene-solr by apache.
the class TestSimilarity2 method testNoFieldSkew.
/** make sure scores are not skewed by docs not containing the field */
public void testNoFieldSkew() throws Exception {
Directory dir = newDirectory();
// an evil merge policy could reorder our docs for no reason
IndexWriterConfig iwConfig = newIndexWriterConfig().setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConfig);
Document doc = new Document();
doc.add(newTextField("foo", "bar baz somethingelse", Field.Store.NO));
iw.addDocument(doc);
IndexReader ir = iw.getReader();
IndexSearcher is = newSearcher(ir);
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
queryBuilder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
queryBuilder.add(new TermQuery(new Term("foo", "baz")), BooleanClause.Occur.SHOULD);
Query query = queryBuilder.build();
// collect scores
List<Explanation> scores = new ArrayList<>();
for (Similarity sim : sims) {
is.setSimilarity(sim);
scores.add(is.explain(query, 0));
}
ir.close();
// add some additional docs without the field
int numExtraDocs = TestUtil.nextInt(random(), 1, 1000);
for (int i = 0; i < numExtraDocs; i++) {
iw.addDocument(new Document());
}
// check scores are the same
ir = iw.getReader();
is = newSearcher(ir);
for (int i = 0; i < sims.size(); i++) {
is.setSimilarity(sims.get(i));
Explanation expected = scores.get(i);
Explanation actual = is.explain(query, 0);
assertEquals(sims.get(i).toString() + ": actual=" + actual + ",expected=" + expected, expected.getValue(), actual.getValue(), 0F);
}
iw.close();
ir.close();
dir.close();
}
use of org.apache.lucene.search.Explanation in project lucene-solr by apache.
the class SweetSpotSimilarityTest method computeNorm.
private static float computeNorm(Similarity sim, String field, int length) throws IOException {
String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" "));
Directory dir = new RAMDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim));
w.addDocument(Collections.singleton(newTextField(field, value, Store.NO)));
DirectoryReader reader = DirectoryReader.open(w);
w.close();
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(sim);
Explanation expl = searcher.explain(new TermQuery(new Term(field, "a")), 0);
reader.close();
dir.close();
Explanation norm = findExplanation(expl, "fieldNorm");
assertNotNull(norm);
return norm.getValue();
}
use of org.apache.lucene.search.Explanation in project lucene-solr by apache.
the class ValueSource method asDoubleValuesSource.
/**
* Expose this ValueSource as a DoubleValuesSource
*/
public DoubleValuesSource asDoubleValuesSource() {
return new DoubleValuesSource() {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
Map context = new HashMap<>();
FakeScorer scorer = new FakeScorer();
context.put("scorer", scorer);
FunctionValues fv = ValueSource.this.getValues(context, ctx);
return new DoubleValues() {
@Override
public double doubleValue() throws IOException {
return fv.doubleVal(scorer.current);
}
@Override
public boolean advanceExact(int doc) throws IOException {
scorer.current = doc;
if (scores != null && scores.advanceExact(doc)) {
scorer.score = (float) scores.doubleValue();
} else
scorer.score = 0;
return fv.exists(doc);
}
};
}
@Override
public boolean needsScores() {
// be on the safe side
return true;
}
@Override
public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
Map context = new HashMap<>();
FakeScorer scorer = new FakeScorer();
scorer.score = scoreExplanation.getValue();
context.put("scorer", scorer);
FunctionValues fv = ValueSource.this.getValues(context, ctx);
return fv.explain(docId);
}
};
}
use of org.apache.lucene.search.Explanation in project lucene-solr by apache.
the class LinearModel method explain.
@Override
public Explanation explain(LeafReaderContext context, int doc, float finalScore, List<Explanation> featureExplanations) {
final List<Explanation> details = new ArrayList<>();
int index = 0;
for (final Explanation featureExplain : featureExplanations) {
final List<Explanation> featureDetails = new ArrayList<>();
featureDetails.add(Explanation.match(featureToWeight[index], "weight on feature"));
featureDetails.add(featureExplain);
details.add(Explanation.match(featureExplain.getValue() * featureToWeight[index], "prod of:", featureDetails));
index++;
}
return Explanation.match(finalScore, toString() + " model applied to features, sum of:", details);
}
Aggregations