use of org.apache.lucene.search.spans.SpanTermQuery in project lucene-solr by apache.
the class TestPayloadSpans method testSpanNot.
public void testSpanNot() throws Exception {
SpanQuery[] clauses = new SpanQuery[2];
clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three"));
SpanQuery spq = new SpanNearQuery(clauses, 5, true);
SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two")));
Directory directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity));
Document doc = new Document();
doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES));
writer.addDocument(doc);
IndexReader reader = getOnlyLeafReader(writer.getReader());
writer.close();
checkSpans(snq.createWeight(newSearcher(reader, false), false, 1f).getSpans(reader.leaves().get(0), SpanWeight.Postings.PAYLOADS), 1, new int[] { 2 });
reader.close();
directory.close();
}
use of org.apache.lucene.search.spans.SpanTermQuery in project lucene-solr by apache.
the class TestPayloadSpans method testSpanFirst.
public void testSpanFirst() throws IOException {
SpanQuery match;
SpanFirstQuery sfq;
match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
sfq = new SpanFirstQuery(match, 2);
Spans spans = sfq.createWeight(searcher, false, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
checkSpans(spans, 109, 1, 1, 1);
//Test more complicated subclause
SpanQuery[] clauses = new SpanQuery[2];
clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
match = new SpanNearQuery(clauses, 0, true);
sfq = new SpanFirstQuery(match, 2);
checkSpans(sfq.createWeight(searcher, false, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
match = new SpanNearQuery(clauses, 0, false);
sfq = new SpanFirstQuery(match, 2);
checkSpans(sfq.createWeight(searcher, false, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
}
use of org.apache.lucene.search.spans.SpanTermQuery in project lucene-solr by apache.
the class QueryBuilder method createSpanQuery.
/**
* Creates a span query from the tokenstream. In the case of a single token, a simple <code>SpanTermQuery</code> is
* returned. When multiple tokens, an ordered <code>SpanNearQuery</code> with slop of 0 is returned.
*/
protected final SpanQuery createSpanQuery(TokenStream in, String field) throws IOException {
TermToBytesRefAttribute termAtt = in.getAttribute(TermToBytesRefAttribute.class);
if (termAtt == null) {
return null;
}
List<SpanTermQuery> terms = new ArrayList<>();
while (in.incrementToken()) {
terms.add(new SpanTermQuery(new Term(field, termAtt.getBytesRef())));
}
if (terms.isEmpty()) {
return null;
} else if (terms.size() == 1) {
return terms.get(0);
} else {
return new SpanNearQuery(terms.toArray(new SpanTermQuery[0]), 0, true);
}
}
use of org.apache.lucene.search.spans.SpanTermQuery in project lucene-solr by apache.
the class TestDisjunctionMaxQuery method testBooleanSpanQuery.
// LUCENE-4477 / LUCENE-4401:
public void testBooleanSpanQuery() throws Exception {
int hits = 0;
Directory directory = newDirectory();
Analyzer indexerAnalyzer = new MockAnalyzer(random());
IndexWriterConfig config = new IndexWriterConfig(indexerAnalyzer);
IndexWriter writer = new IndexWriter(directory, config);
String FIELD = "content";
Document d = new Document();
d.add(new TextField(FIELD, "clockwork orange", Field.Store.YES));
writer.addDocument(d);
writer.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher searcher = newSearcher(indexReader);
DisjunctionMaxQuery query = new DisjunctionMaxQuery(Arrays.asList(new SpanTermQuery(new Term(FIELD, "clockwork")), new SpanTermQuery(new Term(FIELD, "clckwork"))), 1.0f);
TopScoreDocCollector collector = TopScoreDocCollector.create(1000);
searcher.search(query, collector);
hits = collector.topDocs().scoreDocs.length;
for (ScoreDoc scoreDoc : collector.topDocs().scoreDocs) {
System.out.println(scoreDoc.doc);
}
indexReader.close();
assertEquals(hits, 1);
directory.close();
}
use of org.apache.lucene.search.spans.SpanTermQuery in project lucene-solr by apache.
the class HighlighterPhraseTest method testSparseSpan.
public void testSparseSpan() throws IOException, InvalidTokenOffsetsException {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setStoreTermVectorOffsets(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectors(true);
document.add(new Field(FIELD, new TokenStreamSparse(), customType));
indexWriter.addDocument(document);
} finally {
indexWriter.close();
}
final IndexReader indexReader = DirectoryReader.open(directory);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = newSearcher(indexReader);
final Query phraseQuery = new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term(FIELD, "did")), new SpanTermQuery(new Term(FIELD, "jump")) }, 0, true);
TopDocs hits = indexSearcher.search(phraseQuery, 1);
assertEquals(0, hits.totalHits);
final Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(), new SimpleHTMLEncoder(), new QueryScorer(phraseQuery));
final TokenStream tokenStream = TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1);
assertEquals(highlighter.getBestFragment(new TokenStreamSparse(), TEXT), highlighter.getBestFragment(tokenStream, TEXT));
} finally {
indexReader.close();
directory.close();
}
}
Aggregations