use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.
the class TestLatLonPointDistanceSort method testDistanceSort.
/** Add three points and sort by distance */
public void testDistanceSort() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
// add some docs
Document doc = new Document();
doc.add(new LatLonDocValuesField("location", 40.759011, -73.9844722));
iw.addDocument(doc);
doc = new Document();
doc.add(new LatLonDocValuesField("location", 40.718266, -74.007819));
iw.addDocument(doc);
doc = new Document();
doc.add(new LatLonDocValuesField("location", 40.7051157, -74.0088305));
iw.addDocument(doc);
IndexReader reader = iw.getReader();
IndexSearcher searcher = newSearcher(reader);
iw.close();
Sort sort = new Sort(LatLonDocValuesField.newDistanceSort("location", 40.7143528, -74.0059731));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 3, sort);
FieldDoc d = (FieldDoc) td.scoreDocs[0];
assertEquals(462.1028401330431, (Double) d.fields[0], 0.0D);
d = (FieldDoc) td.scoreDocs[1];
assertEquals(1054.9842850974826, (Double) d.fields[0], 0.0D);
d = (FieldDoc) td.scoreDocs[2];
assertEquals(5285.881528419706, (Double) d.fields[0], 0.0D);
reader.close();
dir.close();
}
use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.
the class TestLatLonPointDistanceSort method testMissingLast.
/** Add two points (one doc missing) and sort by distance */
public void testMissingLast() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
// missing
Document doc = new Document();
iw.addDocument(doc);
doc = new Document();
doc.add(new LatLonDocValuesField("location", 40.718266, -74.007819));
iw.addDocument(doc);
doc = new Document();
doc.add(new LatLonDocValuesField("location", 40.7051157, -74.0088305));
iw.addDocument(doc);
IndexReader reader = iw.getReader();
IndexSearcher searcher = newSearcher(reader);
iw.close();
Sort sort = new Sort(LatLonDocValuesField.newDistanceSort("location", 40.7143528, -74.0059731));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 3, sort);
FieldDoc d = (FieldDoc) td.scoreDocs[0];
assertEquals(462.1028401330431D, (Double) d.fields[0], 0.0D);
d = (FieldDoc) td.scoreDocs[1];
assertEquals(1054.9842850974826, (Double) d.fields[0], 0.0D);
d = (FieldDoc) td.scoreDocs[2];
assertEquals(Double.POSITIVE_INFINITY, (Double) d.fields[0], 0.0D);
reader.close();
dir.close();
}
use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.
the class TestNumericDocValuesUpdates method testBiasedMixOfRandomUpdates.
public void testBiasedMixOfRandomUpdates() throws Exception {
// 3 types of operations: add, updated, updateDV.
// rather then randomizing equally, we'll pick (random) cutoffs so each test run is biased,
// in terms of some ops happen more often then others
final int ADD_CUTOFF = TestUtil.nextInt(random(), 1, 98);
final int UPD_CUTOFF = TestUtil.nextInt(random(), ADD_CUTOFF + 1, 99);
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter writer = new IndexWriter(dir, conf);
final int numOperations = atLeast(1000);
final Map<Integer, Long> expected = new HashMap<>(numOperations / 3);
// start with at least one doc before any chance of updates
final int numSeedDocs = atLeast(1);
for (int i = 0; i < numSeedDocs; i++) {
final long val = random().nextLong();
expected.put(i, val);
writer.addDocument(doc(i, val));
}
int numDocUpdates = 0;
int numValueUpdates = 0;
for (int i = 0; i < numOperations; i++) {
final int op = TestUtil.nextInt(random(), 1, 100);
final long val = random().nextLong();
if (op <= ADD_CUTOFF) {
final int id = expected.size();
//System.out.println("TEST i=" + i + ": addDocument id=" + id + " val=" + val);
expected.put(id, val);
writer.addDocument(doc(id, val));
} else {
final int id = TestUtil.nextInt(random(), 0, expected.size() - 1);
expected.put(id, val);
if (op <= UPD_CUTOFF) {
numDocUpdates++;
//System.out.println("TEST i=" + i + ": updateDocument id=" + id + " val=" + val);
writer.updateDocument(new Term("id", "doc-" + id), doc(id, val));
} else {
numValueUpdates++;
//System.out.println("TEST i=" + i + ": updateDV id=" + id + " val=" + val);
writer.updateNumericDocValue(new Term("id", "doc-" + id), "val", val);
}
}
}
writer.commit();
final DirectoryReader reader = DirectoryReader.open(dir);
final IndexSearcher searcher = new IndexSearcher(reader);
// TODO: make more efficient if max numOperations is going to be increased much
for (Map.Entry<Integer, Long> expect : expected.entrySet()) {
String id = "doc-" + expect.getKey();
TopFieldDocs td = searcher.search(new TermQuery(new Term("id", id)), 1, new Sort(new SortField("val", SortField.Type.LONG)));
assertEquals(id + " missing?", 1, td.totalHits);
assertEquals(id + " value", expect.getValue(), ((FieldDoc) td.scoreDocs[0]).fields[0]);
}
IOUtils.close(reader, writer, dir);
}
use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.
the class TestNumericDocValuesUpdates method testMultipleUpdatesSameDoc.
public void testMultipleUpdatesSameDoc() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// small number of docs, so use a tiny maxBufferedDocs
conf.setMaxBufferedDocs(3);
IndexWriter writer = new IndexWriter(dir, conf);
writer.updateDocument(new Term("id", "doc-1"), doc(1, 1000000000L));
writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 1000001111L);
writer.updateDocument(new Term("id", "doc-2"), doc(2, 2000000000L));
writer.updateDocument(new Term("id", "doc-2"), doc(2, 2222222222L));
writer.updateNumericDocValue(new Term("id", "doc-1"), "val", 1111111111L);
writer.commit();
final DirectoryReader reader = DirectoryReader.open(dir);
final IndexSearcher searcher = new IndexSearcher(reader);
TopFieldDocs td;
td = searcher.search(new TermQuery(new Term("id", "doc-1")), 1, new Sort(new SortField("val", SortField.Type.LONG)));
assertEquals("doc-1 missing?", 1, td.scoreDocs.length);
assertEquals("doc-1 value", 1111111111L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
td = searcher.search(new TermQuery(new Term("id", "doc-2")), 1, new Sort(new SortField("val", SortField.Type.LONG)));
assertEquals("doc-2 missing?", 1, td.scoreDocs.length);
assertEquals("doc-2 value", 2222222222L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
IOUtils.close(reader, writer, dir);
}
use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.
the class TopGroupsResultTransformer method serializeTopGroups.
protected NamedList serializeTopGroups(TopGroups<BytesRef> data, SchemaField groupField) throws IOException {
NamedList<Object> result = new NamedList<>();
result.add("totalGroupedHitCount", data.totalGroupedHitCount);
result.add("totalHitCount", data.totalHitCount);
if (data.totalGroupCount != null) {
result.add("totalGroupCount", data.totalGroupCount);
}
final IndexSchema schema = rb.req.getSearcher().getSchema();
SchemaField uniqueField = schema.getUniqueKeyField();
for (GroupDocs<BytesRef> searchGroup : data.groups) {
NamedList<Object> groupResult = new NamedList<>();
groupResult.add("totalHits", searchGroup.totalHits);
if (!Float.isNaN(searchGroup.maxScore)) {
groupResult.add("maxScore", searchGroup.maxScore);
}
List<NamedList<Object>> documents = new ArrayList<>();
for (int i = 0; i < searchGroup.scoreDocs.length; i++) {
NamedList<Object> document = new NamedList<>();
documents.add(document);
Document doc = retrieveDocument(uniqueField, searchGroup.scoreDocs[i].doc);
document.add(ID, uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
if (!Float.isNaN(searchGroup.scoreDocs[i].score)) {
document.add("score", searchGroup.scoreDocs[i].score);
}
if (!(searchGroup.scoreDocs[i] instanceof FieldDoc)) {
// thus don't add sortValues below
continue;
}
FieldDoc fieldDoc = (FieldDoc) searchGroup.scoreDocs[i];
Object[] convertedSortValues = new Object[fieldDoc.fields.length];
for (int j = 0; j < fieldDoc.fields.length; j++) {
Object sortValue = fieldDoc.fields[j];
Sort withinGroupSort = rb.getGroupingSpec().getSortWithinGroup();
SchemaField field = withinGroupSort.getSort()[j].getField() != null ? schema.getFieldOrNull(withinGroupSort.getSort()[j].getField()) : null;
if (field != null) {
FieldType fieldType = field.getType();
if (sortValue != null) {
sortValue = fieldType.marshalSortValue(sortValue);
}
}
convertedSortValues[j] = sortValue;
}
document.add("sortValues", convertedSortValues);
}
groupResult.add("documents", documents);
String groupValue = searchGroup.groupValue != null ? groupField.getType().indexedToReadable(searchGroup.groupValue, new CharsRefBuilder()).toString() : null;
result.add(groupValue, groupResult);
}
return result;
}
Aggregations