use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TopGroupsResultTransformer method transformToNative.
/**
* {@inheritDoc}
*/
@Override
public Map<String, ?> transformToNative(NamedList<NamedList> shardResponse, Sort groupSort, Sort withinGroupSort, String shard) {
Map<String, Object> result = new HashMap<>();
final IndexSchema schema = rb.req.getSearcher().getSchema();
for (Map.Entry<String, NamedList> entry : shardResponse) {
String key = entry.getKey();
NamedList commandResult = entry.getValue();
Integer totalGroupedHitCount = (Integer) commandResult.get("totalGroupedHitCount");
Integer totalHits = (Integer) commandResult.get("totalHits");
if (totalHits != null) {
Integer matches = (Integer) commandResult.get("matches");
Float maxScore = (Float) commandResult.get("maxScore");
if (maxScore == null) {
maxScore = Float.NaN;
}
@SuppressWarnings("unchecked") List<NamedList<Object>> documents = (List<NamedList<Object>>) commandResult.get("documents");
ScoreDoc[] scoreDocs = transformToNativeShardDoc(documents, groupSort, shard, schema);
final TopDocs topDocs;
if (withinGroupSort.equals(Sort.RELEVANCE)) {
topDocs = new TopDocs(totalHits, scoreDocs, maxScore);
} else {
topDocs = new TopFieldDocs(totalHits, scoreDocs, withinGroupSort.getSort(), maxScore);
}
result.put(key, new QueryCommandResult(topDocs, matches));
continue;
}
Integer totalHitCount = (Integer) commandResult.get("totalHitCount");
List<GroupDocs<BytesRef>> groupDocs = new ArrayList<>();
for (int i = 2; i < commandResult.size(); i++) {
String groupValue = commandResult.getName(i);
@SuppressWarnings("unchecked") NamedList<Object> groupResult = (NamedList<Object>) commandResult.getVal(i);
Integer totalGroupHits = (Integer) groupResult.get("totalHits");
Float maxScore = (Float) groupResult.get("maxScore");
if (maxScore == null) {
maxScore = Float.NaN;
}
@SuppressWarnings("unchecked") List<NamedList<Object>> documents = (List<NamedList<Object>>) groupResult.get("documents");
ScoreDoc[] scoreDocs = transformToNativeShardDoc(documents, withinGroupSort, shard, schema);
BytesRef groupValueRef = groupValue != null ? new BytesRef(groupValue) : null;
groupDocs.add(new GroupDocs<>(Float.NaN, maxScore, totalGroupHits, scoreDocs, groupValueRef, null));
}
@SuppressWarnings("unchecked") GroupDocs<BytesRef>[] groupDocsArr = groupDocs.toArray(new GroupDocs[groupDocs.size()]);
TopGroups<BytesRef> topGroups = new TopGroups<>(groupSort.getSort(), withinGroupSort.getSort(), totalHitCount, totalGroupedHitCount, groupDocsArr, Float.NaN);
result.put(key, topGroups);
}
return result;
}
use of org.apache.lucene.search.ScoreDoc in project zm-mailbox by Zimbra.
the class RemoteMailQueue method search0.
private void search0(SearchResult result, IndexReader indexReader, Query query, int offset, int limit) throws IOException {
if (ZimbraLog.rmgmt.isDebugEnabled()) {
ZimbraLog.rmgmt.debug("searching query=" + query + " offset=" + offset + " limit=" + limit + " " + this);
}
Searcher searcher = null;
try {
searcher = new IndexSearcher(indexReader);
TopDocs topDocs = searcher.search(query, (Filter) null, limit);
ScoreDoc[] hits = topDocs.scoreDocs;
if (offset < hits.length) {
int n;
if (limit <= 0) {
n = hits.length;
} else {
n = Math.min(offset + limit, hits.length);
}
for (int i = offset; i < n; i++) {
Document doc = searcher.doc(hits[i].doc);
Map<QueueAttr, String> qitem = docToQueueItem(doc);
result.qitems.add(qitem);
}
}
result.hits = hits.length;
} finally {
if (searcher != null) {
searcher.close();
}
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestBufferedIndexInput method testSetBufferSize.
public void testSetBufferSize() throws IOException {
Path indexDir = createTempDir("testSetBufferSize");
MockFSDirectory dir = new MockFSDirectory(indexDir, random());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setMergePolicy(newLogMergePolicy(false)));
for (int i = 0; i < 37; i++) {
Document doc = new Document();
doc.add(newTextField("content", "aaa bbb ccc ddd" + i, Field.Store.YES));
doc.add(newTextField("id", "" + i, Field.Store.YES));
writer.addDocument(doc);
}
dir.allIndexInputs.clear();
IndexReader reader = DirectoryReader.open(writer);
Term aaa = new Term("content", "aaa");
Term bbb = new Term("content", "bbb");
reader.close();
dir.tweakBufferSizes();
writer.deleteDocuments(new Term("id", "0"));
reader = DirectoryReader.open(writer);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(bbb), 1000).scoreDocs;
dir.tweakBufferSizes();
assertEquals(36, hits.length);
reader.close();
dir.tweakBufferSizes();
writer.deleteDocuments(new Term("id", "4"));
reader = DirectoryReader.open(writer);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(bbb), 1000).scoreDocs;
dir.tweakBufferSizes();
assertEquals(35, hits.length);
dir.tweakBufferSizes();
hits = searcher.search(new TermQuery(new Term("id", "33")), 1000).scoreDocs;
dir.tweakBufferSizes();
assertEquals(1, hits.length);
hits = searcher.search(new TermQuery(aaa), 1000).scoreDocs;
dir.tweakBufferSizes();
assertEquals(35, hits.length);
writer.close();
reader.close();
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class SimplePrimaryNode method verifyAtLeastMarkerCount.
private void verifyAtLeastMarkerCount(int expectedAtLeastCount, DataOutput out) throws IOException {
IndexSearcher searcher = mgr.acquire();
try {
long version = ((DirectoryReader) searcher.getIndexReader()).getVersion();
int hitCount = searcher.count(new TermQuery(new Term("marker", "marker")));
if (hitCount < expectedAtLeastCount) {
message("marker search: expectedAtLeastCount=" + expectedAtLeastCount + " but hitCount=" + hitCount);
TopDocs hits = searcher.search(new TermQuery(new Term("marker", "marker")), expectedAtLeastCount);
List<Integer> seen = new ArrayList<>();
for (ScoreDoc hit : hits.scoreDocs) {
Document doc = searcher.doc(hit.doc);
seen.add(Integer.parseInt(doc.get("docid").substring(1)));
}
Collections.sort(seen);
message("saw markers:");
for (int marker : seen) {
message("saw m" + marker);
}
throw new IllegalStateException("at flush: marker count " + hitCount + " but expected at least " + expectedAtLeastCount + " version=" + version);
}
if (out != null) {
out.writeVLong(version);
out.writeVInt(hitCount);
}
} finally {
mgr.release(searcher);
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class LatLonPoint method nearest.
/**
* Finds the {@code n} nearest indexed points to the provided point, according to Haversine distance.
* <p>
* This is functionally equivalent to running {@link MatchAllDocsQuery} with a {@link LatLonDocValuesField#newDistanceSort},
* but is far more efficient since it takes advantage of properties the indexed BKD tree. Currently this
* only works with {@link Lucene60PointsFormat} (used by the default codec). Multi-valued fields are
* currently not de-duplicated, so if a document had multiple instances of the specified field that
* make it into the top n, that document will appear more than once.
* <p>
* Documents are ordered by ascending distance from the location. The value returned in {@link FieldDoc} for
* the hits contains a Double instance with the distance in meters.
*
* @param searcher IndexSearcher to find nearest points from.
* @param field field name. must not be null.
* @param latitude latitude at the center: must be within standard +/-90 coordinate bounds.
* @param longitude longitude at the center: must be within standard +/-180 coordinate bounds.
* @param n the number of nearest neighbors to retrieve.
* @return TopFieldDocs containing documents ordered by distance, where the field value for each {@link FieldDoc} is the distance in meters
* @throws IllegalArgumentException if the underlying PointValues is not a {@code Lucene60PointsReader} (this is a current limitation), or
* if {@code field} or {@code searcher} is null, or if {@code latitude}, {@code longitude} or {@code n} are out-of-bounds
* @throws IOException if an IOException occurs while finding the points.
*/
// TODO: what about multi-valued documents? what happens?
public static TopFieldDocs nearest(IndexSearcher searcher, String field, double latitude, double longitude, int n) throws IOException {
GeoUtils.checkLatitude(latitude);
GeoUtils.checkLongitude(longitude);
if (n < 1) {
throw new IllegalArgumentException("n must be at least 1; got " + n);
}
if (field == null) {
throw new IllegalArgumentException("field must not be null");
}
if (searcher == null) {
throw new IllegalArgumentException("searcher must not be null");
}
List<BKDReader> readers = new ArrayList<>();
List<Integer> docBases = new ArrayList<>();
List<Bits> liveDocs = new ArrayList<>();
int totalHits = 0;
for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) {
PointValues points = leaf.reader().getPointValues(field);
if (points != null) {
if (points instanceof BKDReader == false) {
throw new IllegalArgumentException("can only run on Lucene60PointsReader points implementation, but got " + points);
}
totalHits += points.getDocCount();
BKDReader reader = (BKDReader) points;
if (reader != null) {
readers.add(reader);
docBases.add(leaf.docBase);
liveDocs.add(leaf.reader().getLiveDocs());
}
}
}
NearestNeighbor.NearestHit[] hits = NearestNeighbor.nearest(latitude, longitude, readers, liveDocs, docBases, n);
// Convert to TopFieldDocs:
ScoreDoc[] scoreDocs = new ScoreDoc[hits.length];
for (int i = 0; i < hits.length; i++) {
NearestNeighbor.NearestHit hit = hits[i];
scoreDocs[i] = new FieldDoc(hit.docID, 0.0f, new Object[] { Double.valueOf(hit.distanceMeters) });
}
return new TopFieldDocs(totalHits, scoreDocs, null, 0.0f);
}
Aggregations