use of org.apache.lucene.search.PhraseQuery in project lucene-solr by apache.
the class SolrQueryParserBase method getFieldQuery.
/**
* Base implementation delegates to {@link #getFieldQuery(String,String,boolean,boolean)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
*
*/
protected Query getFieldQuery(String field, String queryText, int slop) throws SyntaxError {
Query query = getFieldQuery(field, queryText, true, false);
// and not a sub-parser.
if (subQParser == null) {
if (query instanceof PhraseQuery) {
PhraseQuery pq = (PhraseQuery) query;
Term[] terms = pq.getTerms();
int[] positions = pq.getPositions();
PhraseQuery.Builder builder = new PhraseQuery.Builder();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
builder.setSlop(slop);
query = builder.build();
} else if (query instanceof MultiPhraseQuery) {
MultiPhraseQuery mpq = (MultiPhraseQuery) query;
if (slop != mpq.getSlop()) {
query = new MultiPhraseQuery.Builder(mpq).setSlop(slop).build();
}
}
}
return query;
}
use of org.apache.lucene.search.PhraseQuery in project lucene-solr by apache.
the class SimpleSloppyPhraseQueryMaker method prepareQueries.
/* (non-Javadoc)
* @see org.apache.lucene.benchmark.byTask.feeds.SimpleQueryMaker#prepareQueries()
*/
@Override
protected Query[] prepareQueries() throws Exception {
// extract some 100 words from doc text to an array
String[] words;
ArrayList<String> w = new ArrayList<>();
StringTokenizer st = new StringTokenizer(SingleDocSource.DOC_TEXT);
while (st.hasMoreTokens() && w.size() < 100) {
w.add(st.nextToken());
}
words = w.toArray(new String[0]);
// create queries (that would find stuff) with varying slops
ArrayList<Query> queries = new ArrayList<>();
for (int slop = 0; slop < 8; slop++) {
for (int qlen = 2; qlen < 6; qlen++) {
for (int wd = 0; wd < words.length - qlen - slop; wd++) {
// ordered
int remainedSlop = slop;
int wind = wd;
PhraseQuery.Builder builder = new PhraseQuery.Builder();
for (int i = 0; i < qlen; i++) {
builder.add(new Term(DocMaker.BODY_FIELD, words[wind++]), i);
if (remainedSlop > 0) {
remainedSlop--;
wind++;
}
}
builder.setSlop(slop);
PhraseQuery q = builder.build();
queries.add(q);
// reversed
remainedSlop = slop;
wind = wd + qlen + remainedSlop - 1;
builder = new PhraseQuery.Builder();
for (int i = 0; i < qlen; i++) {
builder.add(new Term(DocMaker.BODY_FIELD, words[wind--]), i);
if (remainedSlop > 0) {
remainedSlop--;
wind--;
}
}
builder.setSlop(slop + 2 * qlen);
q = builder.build();
queries.add(q);
}
}
}
return queries.toArray(new Query[0]);
}
use of org.apache.lucene.search.PhraseQuery in project lucene-solr by apache.
the class TestAddIndexes method testWithPendingDeletes3.
public void testWithPendingDeletes3() throws IOException {
// main directory
Directory dir = newDirectory();
// auxiliary directory
Directory aux = newDirectory();
setUpDirs(dir, aux);
IndexWriter writer = newWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
doc.add(new IntPoint("doc", i));
doc.add(new IntPoint("doc2d", i, i));
doc.add(new NumericDocValuesField("dv", i));
writer.updateDocument(new Term("id", "" + (i % 10)), doc);
}
// Deletes one of the 10 added docs, leaving 9:
PhraseQuery q = new PhraseQuery("content", "bbb", "14");
writer.deleteDocuments(q);
writer.addIndexes(aux);
writer.forceMerge(1);
writer.commit();
verifyNumDocs(dir, 1039);
verifyTermDocs(dir, new Term("content", "aaa"), 1030);
verifyTermDocs(dir, new Term("content", "bbb"), 9);
writer.close();
dir.close();
aux.close();
}
use of org.apache.lucene.search.PhraseQuery in project lucene-solr by apache.
the class TestOmitTf method testBasic.
// Test scores with one field with Term Freqs and one without, otherwise with equal content
public void testBasic() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer).setMaxBufferedDocs(2).setSimilarity(new SimpleSimilarity()).setMergePolicy(newLogMergePolicy(2)));
StringBuilder sb = new StringBuilder(265);
String term = "term";
for (int i = 0; i < 30; i++) {
Document d = new Document();
sb.append(term).append(" ");
String content = sb.toString();
Field noTf = newField("noTf", content + (i % 2 == 0 ? "" : " notf"), omitType);
d.add(noTf);
Field tf = newField("tf", content + (i % 2 == 0 ? " tf" : ""), normalType);
d.add(tf);
writer.addDocument(d);
//System.out.println(d);
}
writer.forceMerge(1);
// flush
writer.close();
/*
* Verify the index
*/
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("noTf", term);
Term b = new Term("tf", term);
Term c = new Term("noTf", "notf");
Term d = new Term("tf", "tf");
TermQuery q1 = new TermQuery(a);
TermQuery q2 = new TermQuery(b);
TermQuery q3 = new TermQuery(c);
TermQuery q4 = new TermQuery(d);
PhraseQuery pq = new PhraseQuery(a.field(), a.bytes(), c.bytes());
Exception expected = expectThrows(Exception.class, () -> {
searcher.search(pq, 10);
});
Throwable cause = expected;
// If the searcher uses an executor service, the IAE is wrapped into other exceptions
while (cause.getCause() != null) {
cause = cause.getCause();
}
assertTrue("Expected an IAE, got " + cause, cause instanceof IllegalStateException);
searcher.search(q1, new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
}
@Override
public final void setScorer(Scorer scorer) {
this.scorer = scorer;
}
@Override
public final void collect(int doc) throws IOException {
//System.out.println("Q1: Doc=" + doc + " score=" + score);
float score = scorer.score();
assertTrue("got score=" + score, score == 1.0f);
super.collect(doc);
}
});
//System.out.println(CountingHitCollector.getCount());
searcher.search(q2, new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
}
@Override
public final void setScorer(Scorer scorer) {
this.scorer = scorer;
}
@Override
public final void collect(int doc) throws IOException {
//System.out.println("Q2: Doc=" + doc + " score=" + score);
float score = scorer.score();
assertEquals(1.0f + doc, score, 0.00001f);
super.collect(doc);
}
});
//System.out.println(CountingHitCollector.getCount());
searcher.search(q3, new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
}
@Override
public final void setScorer(Scorer scorer) {
this.scorer = scorer;
}
@Override
public final void collect(int doc) throws IOException {
//System.out.println("Q1: Doc=" + doc + " score=" + score);
float score = scorer.score();
assertTrue(score == 1.0f);
assertFalse(doc % 2 == 0);
super.collect(doc);
}
});
//System.out.println(CountingHitCollector.getCount());
searcher.search(q4, new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
}
@Override
public final void setScorer(Scorer scorer) {
this.scorer = scorer;
}
@Override
public final void collect(int doc) throws IOException {
float score = scorer.score();
//System.out.println("Q1: Doc=" + doc + " score=" + score);
assertTrue(score == 1.0f);
assertTrue(doc % 2 == 0);
super.collect(doc);
}
});
//System.out.println(CountingHitCollector.getCount());
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(q1, Occur.MUST);
bq.add(q4, Occur.MUST);
searcher.search(bq.build(), new CountingHitCollector() {
@Override
public final void collect(int doc) throws IOException {
//System.out.println("BQ: Doc=" + doc + " score=" + score);
super.collect(doc);
}
});
assertEquals(15, CountingHitCollector.getCount());
reader.close();
dir.close();
}
use of org.apache.lucene.search.PhraseQuery in project lucene-solr by apache.
the class TestReadOnlyIndex method doTestReadOnlyIndex.
private Void doTestReadOnlyIndex() throws Exception {
Directory dir = FSDirectory.open(indexPath);
IndexReader ireader = DirectoryReader.open(dir);
IndexSearcher isearcher = newSearcher(ireader);
// borrows from TestDemo, but not important to keep in sync with demo
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));
TopDocs hits = isearcher.search(query, 1);
assertEquals(1, hits.totalHits);
// Iterate through the results:
for (int i = 0; i < hits.scoreDocs.length; i++) {
Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
assertEquals(text, hitDoc.get("fieldname"));
}
// Test simple phrase query
PhraseQuery phraseQuery = new PhraseQuery("fieldname", "to", "be");
assertEquals(1, isearcher.search(phraseQuery, 1).totalHits);
ireader.close();
// void
return null;
}
Aggregations