use of org.apache.lucene.search.TopDocs in project neo4j by neo4j.
the class LuceneSchemaIndexPopulatorTest method assertIndexedValues.
private void assertIndexedValues(Hit... expectedHits) throws IOException {
switchToVerification();
for (Hit hit : expectedHits) {
TopDocs hits = searcher.search(LuceneDocumentStructure.newSeekQuery(hit.value), 10);
assertEquals("Unexpected number of index results from " + hit.value, hit.nodeIds.length, hits.totalHits);
Set<Long> foundNodeIds = new HashSet<>();
for (int i = 0; i < hits.totalHits; i++) {
Document document = searcher.doc(hits.scoreDocs[i].doc);
foundNodeIds.add(parseLong(document.get("id")));
}
assertEquals(asSet(hit.nodeIds), foundNodeIds);
}
}
use of org.apache.lucene.search.TopDocs in project graphdb by neo4j-attic.
the class LuceneBatchInserterIndex method removeFromCache.
private void removeFromCache(long entityId) throws IOException, CorruptIndexException {
IndexSearcher searcher = searcher();
Query query = type.idTermQuery(entityId);
TopDocs docs = searcher.search(query, 1);
if (docs.totalHits > 0) {
Document document = searcher.doc(docs.scoreDocs[0].doc);
for (Fieldable field : document.getFields()) {
String key = field.name();
Object value = field.stringValue();
removeFromCache(entityId, key, value);
}
}
}
use of org.apache.lucene.search.TopDocs in project neo4j by neo4j.
the class DocValuesCollector method getSortedValuesIterator.
/**
* @param field the field that contains the values
* @param sort how the results should be sorted
* @return an iterator over all NumericDocValues from the given field with respect to the given sort
* @throws IOException
*/
public ValuesIterator getSortedValuesIterator(String field, Sort sort) throws IOException {
if (sort == null || sort == Sort.INDEXORDER) {
return getValuesIterator(field);
}
int size = getTotalHits();
if (size == 0) {
return ValuesIterator.EMPTY;
}
TopDocs topDocs = getTopDocs(sort, size);
LeafReaderContext[] contexts = getLeafReaderContexts(getMatchingDocs());
return new TopDocsValuesIterator(topDocs, contexts, field);
}
use of org.apache.lucene.search.TopDocs in project gerrit by GerritCodeReview.
the class QueryDocumentationExecutor method doQuery.
public List<DocResult> doQuery(String q) throws DocQueryException {
if (!isAvailable()) {
throw new DocQueryException("Documentation search not available");
}
Query query = parser.parse(q);
try {
// TODO(fishywang): Currently as we don't have much documentation, we just use MAX_VALUE here
// and skipped paging. Maybe add paging later.
TopDocs results = searcher.search(query, Integer.MAX_VALUE);
ScoreDoc[] hits = results.scoreDocs;
int totalHits = results.totalHits;
List<DocResult> out = Lists.newArrayListWithCapacity(totalHits);
for (int i = 0; i < totalHits; i++) {
DocResult result = new DocResult();
Document doc = searcher.doc(hits[i].doc);
result.url = doc.get(Constants.URL_FIELD);
result.title = doc.get(Constants.TITLE_FIELD);
out.add(result);
}
return out;
} catch (IOException e) {
throw new DocQueryException(e);
}
}
use of org.apache.lucene.search.TopDocs in project lucene-solr by apache.
the class JtsPolygonTest method testBadPrefixTreePrune.
/**
* A PrefixTree pruning optimization gone bad.
* See <a href="https://issues.apache.org/jira/browse/LUCENE-4770">LUCENE-4770</a>.
*/
@Test
public void testBadPrefixTreePrune() throws Exception {
Shape area = ctx.readShapeFromWkt("POLYGON((-122.83 48.57, -122.77 48.56, -122.79 48.53, -122.83 48.57))");
SpatialPrefixTree trie = new QuadPrefixTree(ctx, 12);
TermQueryPrefixTreeStrategy strategy = new TermQueryPrefixTreeStrategy(trie, "geo");
Document doc = new Document();
doc.add(new TextField("id", "1", Store.YES));
Field[] fields = strategy.createIndexableFields(area, 0.025);
for (Field field : fields) {
doc.add(field);
}
addDocument(doc);
Point upperleft = ctx.makePoint(-122.88, 48.54);
Point lowerright = ctx.makePoint(-122.82, 48.62);
Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(upperleft, lowerright)));
commit();
TopDocs search = indexSearcher.search(query, 10);
ScoreDoc[] scoreDocs = search.scoreDocs;
for (ScoreDoc scoreDoc : scoreDocs) {
System.out.println(indexSearcher.doc(scoreDoc.doc));
}
assertEquals(1, search.totalHits);
}
Aggregations