use of org.apache.lucene.search.ScoreDoc in project jena by apache.
the class TextIndexLucene method query$.
private List<TextHit> query$(IndexReader indexReader, Node property, String qs, String graphURI, String lang, int limit) throws ParseException, IOException {
String textField = docDef.getField(property);
String textClause;
String langClause = null;
String graphClause = null;
//for language-based search extension
if (getDocDef().getLangField() != null) {
String langField = getDocDef().getLangField();
if (lang != null) {
if (this.isMultilingual && !lang.equals("none")) {
textField = textField + "_" + lang;
}
langClause = !"none".equals(lang) ? langField + ":" + lang : "-" + langField + ":*";
}
}
if (textField != null)
textClause = textField + ":" + qs;
else
textClause = qs;
if (graphURI != null) {
String escaped = QueryParserBase.escape(graphURI);
graphClause = getDocDef().getGraphField() + ":" + escaped;
}
String queryString = textClause;
if (langClause != null)
queryString = "(" + queryString + ") AND " + langClause;
if (graphClause != null)
queryString = "(" + queryString + ") AND " + graphClause;
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Query query = parseQuery(queryString, queryAnalyzer);
if (limit <= 0)
limit = MAX_N;
ScoreDoc[] sDocs = indexSearcher.search(query, limit).scoreDocs;
List<TextHit> results = new ArrayList<>();
for (ScoreDoc sd : sDocs) {
Document doc = indexSearcher.doc(sd.doc);
String[] values = doc.getValues(docDef.getEntityField());
Node literal = null;
String field = (property != null) ? docDef.getField(property) : docDef.getPrimaryField();
String[] lexicals = doc.getValues(field);
if (lexicals.length > 0) {
String lexical = lexicals[0];
String[] langs = doc.getValues(docDef.getLangField());
if (langs.length > 0) {
String doclang = langs[0];
if (doclang.startsWith(DATATYPE_PREFIX)) {
String datatype = doclang.substring(DATATYPE_PREFIX.length());
TypeMapper tmap = TypeMapper.getInstance();
literal = NodeFactory.createLiteral(lexical, tmap.getSafeTypeByName(datatype));
} else {
literal = NodeFactory.createLiteral(lexical, doclang);
}
} else {
literal = NodeFactory.createLiteral(lexical);
}
}
for (String v : values) {
Node n = TextQueryFuncs.stringToNode(v);
TextHit hit = new TextHit(n, sd.score, literal);
results.add(hit);
}
}
return results;
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestBackwardsCompatibility method changeIndexWithAdds.
public void changeIndexWithAdds(Random random, Directory dir, Version nameVersion) throws IOException {
SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
assertEquals(nameVersion, infos.getCommitLuceneVersion());
assertEquals(nameVersion, infos.getMinSegmentLuceneVersion());
// open writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
// add 10 docs
for (int i = 0; i < 10; i++) {
addDoc(writer, 35 + i);
}
// make sure writer sees right total -- writer seems not to know about deletes in .del?
final int expected = 45;
assertEquals("wrong doc count", expected, writer.numDocs());
writer.close();
// make sure searching sees right # hits
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs;
Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("wrong first document", "0", d.get("id"));
doTestHits(hits, 44, searcher.getIndexReader());
reader.close();
// fully merge
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
writer.forceMerge(1);
writer.close();
reader = DirectoryReader.open(dir);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs;
assertEquals("wrong number of hits", 44, hits.length);
d = searcher.doc(hits[0].doc);
doTestHits(hits, 44, searcher.getIndexReader());
assertEquals("wrong first document", "0", d.get("id"));
reader.close();
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestIndexWriterCommit method testCommitOnClose.
/*
* Simple test for "commit on close": open writer then
* add a bunch of docs, making sure reader does not see
* these docs until writer is closed.
*/
public void testCommitOnClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
for (int i = 0; i < 14; i++) {
TestIndexWriter.addDoc(writer);
}
writer.close();
Term searchTerm = new Term("content", "aaa");
DirectoryReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
reader.close();
reader = DirectoryReader.open(dir);
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 11; j++) {
TestIndexWriter.addDoc(writer);
}
IndexReader r = DirectoryReader.open(dir);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
r.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
// Now, close the writer:
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
IndexReader r = DirectoryReader.open(dir);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
r.close();
reader.close();
dir.close();
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestDemoParallelLeafReader method testNumericDVSort.
private static void testNumericDVSort(IndexSearcher s) throws IOException {
// Confirm we can sort by the new DV field:
TopDocs hits = s.search(new MatchAllDocsQuery(), 100, new Sort(new SortField("number", SortField.Type.LONG)));
long last = Long.MIN_VALUE;
for (ScoreDoc scoreDoc : hits.scoreDocs) {
long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
assertTrue(value >= last);
assertEquals(value, ((Long) ((FieldDoc) scoreDoc).fields[0]).longValue());
last = value;
}
}
use of org.apache.lucene.search.ScoreDoc in project lucene-solr by apache.
the class TestParallelCompositeReader method queryTest.
private void queryTest(Query query) throws IOException {
ScoreDoc[] parallelHits = parallel.search(query, 1000).scoreDocs;
ScoreDoc[] singleHits = single.search(query, 1000).scoreDocs;
assertEquals(parallelHits.length, singleHits.length);
for (int i = 0; i < parallelHits.length; i++) {
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
Document docParallel = parallel.doc(parallelHits[i].doc);
Document docSingle = single.doc(singleHits[i].doc);
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
assertEquals(docParallel.get("f4"), docSingle.get("f4"));
}
}
Aggregations