use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestNumericTerms32 method testSorting.
private void testSorting(int precisionStep) throws Exception {
String field = "field" + precisionStep;
// 10 random tests, the index order is ascending,
// so using a reverse sort field should retun descending documents
int num = TestUtil.nextInt(random(), 10, 20);
for (int i = 0; i < num; i++) {
int lower = (int) (random().nextDouble() * noDocs * distance) + startOffset;
int upper = (int) (random().nextDouble() * noDocs * distance) + startOffset;
if (lower > upper) {
int a = lower;
lower = upper;
upper = a;
}
Query tq = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
if (topDocs.totalHits == 0)
continue;
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
for (int j = 1; j < sd.length; j++) {
int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
assertTrue("Docs should be sorted backwards", last > act);
last = act;
}
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestNumericTerms64 method testSorting.
private void testSorting(int precisionStep) throws Exception {
String field = "field" + precisionStep;
// 10 random tests, the index order is ascending,
// so using a reverse sort field should retun descending documents
int num = TestUtil.nextInt(random(), 10, 20);
for (int i = 0; i < num; i++) {
long lower = (long) (random().nextDouble() * noDocs * distance) + startOffset;
long upper = (long) (random().nextDouble() * noDocs * distance) + startOffset;
if (lower > upper) {
long a = lower;
lower = upper;
upper = a;
}
Query tq = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
if (topDocs.totalHits == 0)
continue;
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
long last = searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
for (int j = 1; j < sd.length; j++) {
long act = searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
assertTrue("Docs should be sorted backwards", last > act);
last = act;
}
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class ShingleAnalyzerWrapperTest method testShingleAnalyzerWrapperPhraseQuery.
/*
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
try (TokenStream ts = analyzer.tokenStream("content", "this sentence")) {
int j = -1;
PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while (ts.incrementToken()) {
j += posIncrAtt.getPositionIncrement();
String termText = termAtt.toString();
builder.add(new Term("content", termText), j);
}
ts.end();
}
PhraseQuery q = builder.build();
ScoreDoc[] hits = searcher.search(q, 1000).scoreDocs;
int[] ranks = new int[] { 0 };
compareRanks(hits, ranks);
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class QualityBenchmark method analyzeQueryResults.
/* Analyze/judge results for a single quality query; optionally log them. */
private QualityStats analyzeQueryResults(QualityQuery qq, Query q, TopDocs td, Judge judge, PrintWriter logger, long searchTime) throws IOException {
QualityStats stts = new QualityStats(judge.maxRecall(qq), searchTime);
ScoreDoc[] sd = td.scoreDocs;
// extraction of first doc name we measure also construction of doc name extractor, just in case.
long t1 = System.currentTimeMillis();
DocNameExtractor xt = new DocNameExtractor(docNameField);
for (int i = 0; i < sd.length; i++) {
String docName = xt.docName(searcher, sd[i].doc);
long docNameExtractTime = System.currentTimeMillis() - t1;
t1 = System.currentTimeMillis();
boolean isRelevant = judge.isRelevant(docName, qq);
stts.addResult(i + 1, isRelevant, docNameExtractTime);
}
if (logger != null) {
logger.println(qq.getQueryID() + " - " + q);
stts.log(qq.getQueryID() + " Stats:", 1, logger, " ");
}
return stts;
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestIndexSorting method testRandom3.
// pits index time sorting against query time sorting
public void testRandom3() throws Exception {
int numDocs;
if (TEST_NIGHTLY) {
numDocs = atLeast(100000);
} else {
numDocs = atLeast(1000);
}
List<RandomDoc> docs = new ArrayList<>();
Sort sort = randomSort();
if (VERBOSE) {
System.out.println("TEST: numDocs=" + numDocs + " use sort=" + sort);
}
// no index sorting, all search-time sorting:
Directory dir1 = newFSDirectory(createTempDir());
IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w1 = new IndexWriter(dir1, iwc1);
// use index sorting:
Directory dir2 = newFSDirectory(createTempDir());
IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
iwc2.setIndexSort(sort);
IndexWriter w2 = new IndexWriter(dir2, iwc2);
Set<Integer> toDelete = new HashSet<>();
double deleteChance = random().nextDouble();
for (int id = 0; id < numDocs; id++) {
RandomDoc docValues = new RandomDoc(id);
docs.add(docValues);
if (VERBOSE) {
System.out.println("TEST: doc id=" + id);
System.out.println(" int=" + docValues.intValue);
System.out.println(" long=" + docValues.longValue);
System.out.println(" float=" + docValues.floatValue);
System.out.println(" double=" + docValues.doubleValue);
System.out.println(" bytes=" + new BytesRef(docValues.bytesValue));
}
Document doc = new Document();
doc.add(new StringField("id", Integer.toString(id), Field.Store.YES));
doc.add(new NumericDocValuesField("id", id));
doc.add(new NumericDocValuesField("int", docValues.intValue));
doc.add(new NumericDocValuesField("long", docValues.longValue));
doc.add(new DoubleDocValuesField("double", docValues.doubleValue));
doc.add(new FloatDocValuesField("float", docValues.floatValue));
doc.add(new SortedDocValuesField("bytes", new BytesRef(docValues.bytesValue)));
for (int value : docValues.intValues) {
doc.add(new SortedNumericDocValuesField("multi_valued_int", value));
}
for (long value : docValues.longValues) {
doc.add(new SortedNumericDocValuesField("multi_valued_long", value));
}
for (float value : docValues.floatValues) {
doc.add(new SortedNumericDocValuesField("multi_valued_float", NumericUtils.floatToSortableInt(value)));
}
for (double value : docValues.doubleValues) {
doc.add(new SortedNumericDocValuesField("multi_valued_double", NumericUtils.doubleToSortableLong(value)));
}
for (byte[] value : docValues.bytesValues) {
doc.add(new SortedSetDocValuesField("multi_valued_bytes", new BytesRef(value)));
}
w1.addDocument(doc);
w2.addDocument(doc);
if (random().nextDouble() < deleteChance) {
toDelete.add(id);
}
}
for (int id : toDelete) {
w1.deleteDocuments(new Term("id", Integer.toString(id)));
w2.deleteDocuments(new Term("id", Integer.toString(id)));
}
DirectoryReader r1 = DirectoryReader.open(w1);
IndexSearcher s1 = newSearcher(r1);
if (random().nextBoolean()) {
int maxSegmentCount = TestUtil.nextInt(random(), 1, 5);
if (VERBOSE) {
System.out.println("TEST: now forceMerge(" + maxSegmentCount + ")");
}
w2.forceMerge(maxSegmentCount);
}
DirectoryReader r2 = DirectoryReader.open(w2);
IndexSearcher s2 = newSearcher(r2);
for (int iter = 0; iter < 100; iter++) {
int numHits = TestUtil.nextInt(random(), 1, numDocs);
if (VERBOSE) {
System.out.println("TEST: iter=" + iter + " numHits=" + numHits);
}
TopFieldCollector c1 = TopFieldCollector.create(sort, numHits, true, true, true);
s1.search(new MatchAllDocsQuery(), c1);
TopDocs hits1 = c1.topDocs();
TopFieldCollector c2 = TopFieldCollector.create(sort, numHits, true, true, true);
EarlyTerminatingSortingCollector c3 = new EarlyTerminatingSortingCollector(c2, sort, numHits);
s2.search(new MatchAllDocsQuery(), c3);
TopDocs hits2 = c2.topDocs();
if (VERBOSE) {
System.out.println(" topDocs query-time sort: totalHits=" + hits1.totalHits);
for (ScoreDoc scoreDoc : hits1.scoreDocs) {
System.out.println(" " + scoreDoc.doc);
}
System.out.println(" topDocs index-time sort: totalHits=" + hits2.totalHits);
for (ScoreDoc scoreDoc : hits2.scoreDocs) {
System.out.println(" " + scoreDoc.doc);
}
}
assertTrue(hits2.totalHits <= hits1.totalHits);
assertEquals(hits2.scoreDocs.length, hits1.scoreDocs.length);
for (int i = 0; i < hits2.scoreDocs.length; i++) {
ScoreDoc hit1 = hits1.scoreDocs[i];
ScoreDoc hit2 = hits2.scoreDocs[i];
assertEquals(r1.document(hit1.doc).get("id"), r2.document(hit2.doc).get("id"));
assertEquals(((FieldDoc) hit1).fields, ((FieldDoc) hit2).fields);
}
}
IOUtils.close(r1, r2, w1, w2, dir1, dir2);
}
Aggregations