use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestGrouping method assertEquals.
private void assertEquals(int[] docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
if (expected == null) {
assertNull(actual);
return;
}
assertNotNull(actual);
assertEquals("expected.groups.length != actual.groups.length", expected.groups.length, actual.groups.length);
assertEquals("expected.totalHitCount != actual.totalHitCount", expected.totalHitCount, actual.totalHitCount);
assertEquals("expected.totalGroupedHitCount != actual.totalGroupedHitCount", expected.totalGroupedHitCount, actual.totalGroupedHitCount);
if (expected.totalGroupCount != null && verifyTotalGroupCount) {
assertEquals("expected.totalGroupCount != actual.totalGroupCount", expected.totalGroupCount, actual.totalGroupCount);
}
for (int groupIDX = 0; groupIDX < expected.groups.length; groupIDX++) {
if (VERBOSE) {
System.out.println(" check groupIDX=" + groupIDX);
}
final GroupDocs<BytesRef> expectedGroup = expected.groups[groupIDX];
final GroupDocs<BytesRef> actualGroup = actual.groups[groupIDX];
if (verifyGroupValues) {
if (idvBasedImplsUsed) {
if (actualGroup.groupValue.length == 0) {
assertNull(expectedGroup.groupValue);
} else {
assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
}
} else {
assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
}
}
if (verifySortValues) {
assertArrayEquals(expectedGroup.groupSortValues, actualGroup.groupSortValues);
}
// TODO
// assertEquals(expectedGroup.maxScore, actualGroup.maxScore);
assertEquals(expectedGroup.totalHits, actualGroup.totalHits);
final ScoreDoc[] expectedFDs = expectedGroup.scoreDocs;
final ScoreDoc[] actualFDs = actualGroup.scoreDocs;
assertEquals(expectedFDs.length, actualFDs.length);
for (int docIDX = 0; docIDX < expectedFDs.length; docIDX++) {
final FieldDoc expectedFD = (FieldDoc) expectedFDs[docIDX];
final FieldDoc actualFD = (FieldDoc) actualFDs[docIDX];
//System.out.println(" actual doc=" + docIDtoID[actualFD.doc] + " score=" + actualFD.score);
assertEquals(expectedFD.doc, docIDtoID[actualFD.doc]);
if (testScores) {
assertEquals(expectedFD.score, actualFD.score, 0.1);
} else {
// TODO: too anal for now
//assertEquals(Float.NaN, actualFD.score);
}
if (verifySortValues) {
assertArrayEquals(expectedFD.fields, actualFD.fields);
}
}
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestPayloadTermQuery method test.
public void test() throws IOException {
SpanQuery query = new PayloadScoreQuery(new SpanTermQuery(new Term("field", "seventy")), new MaxPayloadFunction());
TopDocs hits = searcher.search(query, 100);
assertTrue("hits is null and it shouldn't be", hits != null);
assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100);
//they should all have the exact same score, because they all contain seventy once, and we set
//all the other similarity factors to be 1
assertTrue(hits.getMaxScore() + " does not equal: " + 1, hits.getMaxScore() == 1);
for (int i = 0; i < hits.scoreDocs.length; i++) {
ScoreDoc doc = hits.scoreDocs[i];
assertTrue(doc.score + " does not equal: " + 1, doc.score == 1);
}
CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true);
Spans spans = query.createWeight(searcher, false, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS);
assertTrue("spans is null and it shouldn't be", spans != null);
/*float score = hits.score(0);
for (int i =1; i < hits.length(); i++)
{
assertTrue("scores are not equal and they should be", score == hits.score(i));
}*/
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestPayloadTermQuery method testMultipleMatchesPerDoc.
public void testMultipleMatchesPerDoc() throws Exception {
SpanQuery query = new PayloadScoreQuery(new SpanTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy")), new MaxPayloadFunction());
TopDocs hits = searcher.search(query, 100);
assertTrue("hits is null and it shouldn't be", hits != null);
assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100);
//they should all have the exact same score, because they all contain seventy once, and we set
//all the other similarity factors to be 1
//System.out.println("Hash: " + seventyHash + " Twice Hash: " + 2*seventyHash);
assertTrue(hits.getMaxScore() + " does not equal: " + 4.0, hits.getMaxScore() == 4.0);
//there should be exactly 10 items that score a 4, all the rest should score a 2
//The 10 items are: 70 + i*100 where i in [0-9]
int numTens = 0;
for (int i = 0; i < hits.scoreDocs.length; i++) {
ScoreDoc doc = hits.scoreDocs[i];
if (doc.doc % 10 == 0) {
numTens++;
assertTrue(doc.score + " does not equal: " + 4.0, doc.score == 4.0);
} else {
assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
}
}
assertTrue(numTens + " does not equal: " + 10, numTens == 10);
CheckHits.checkExplanations(query, "field", searcher, true);
Spans spans = query.createWeight(searcher, false, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS);
assertTrue("spans is null and it shouldn't be", spans != null);
//should be two matches per document
int count = 0;
//100 hits times 2 matches per hit, we should have 200 in count
while (spans.nextDoc() != Spans.NO_MORE_DOCS) {
while (spans.nextStartPosition() != Spans.NO_MORE_POSITIONS) {
count++;
}
}
assertTrue(count + " does not equal: " + 200, count == 200);
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestCoreParser method dumpResults.
protected void dumpResults(String qType, Query q, int numDocs) throws IOException {
if (VERBOSE) {
System.out.println("TEST: qType=" + qType + " numDocs=" + numDocs + " " + q.getClass().getCanonicalName() + " query=" + q);
}
final IndexSearcher searcher = searcher();
TopDocs hits = searcher.search(q, numDocs);
final boolean producedResults = (hits.totalHits > 0);
if (!producedResults) {
System.out.println("TEST: qType=" + qType + " numDocs=" + numDocs + " " + q.getClass().getCanonicalName() + " query=" + q);
}
if (VERBOSE) {
ScoreDoc[] scoreDocs = hits.scoreDocs;
for (int i = 0; i < Math.min(numDocs, hits.totalHits); i++) {
Document ldoc = searcher.doc(scoreDocs[i].doc);
System.out.println("[" + ldoc.get("date") + "]" + ldoc.get("contents"));
}
System.out.println();
}
assertTrue(qType + " produced no results", producedResults);
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestQueryWrapperFilter method testRandom.
public void testRandom() throws Exception {
final Directory d = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random(), d);
w.w.getConfig().setMaxBufferedDocs(17);
final int numDocs = atLeast(100);
final Set<String> aDocs = new HashSet<>();
for (int i = 0; i < numDocs; i++) {
final Document doc = new Document();
final String v;
if (random().nextInt(5) == 4) {
v = "a";
aDocs.add("" + i);
} else {
v = "b";
}
final Field f = newStringField("field", v, Field.Store.NO);
doc.add(f);
doc.add(newStringField("id", "" + i, Field.Store.YES));
w.addDocument(doc);
}
final int numDelDocs = atLeast(10);
for (int i = 0; i < numDelDocs; i++) {
final String delID = "" + random().nextInt(numDocs);
w.deleteDocuments(new Term("id", delID));
aDocs.remove(delID);
}
final IndexReader r = w.getReader();
w.close();
final TopDocs hits = newSearcher(r).search(new QueryWrapperFilter(new TermQuery(new Term("field", "a"))), numDocs);
assertEquals(aDocs.size(), hits.totalHits);
for (ScoreDoc sd : hits.scoreDocs) {
assertTrue(aDocs.contains(r.document(sd.doc).get("id")));
}
r.close();
d.close();
}
Aggregations