use of org.opensearch.search.fetch.FetchSubPhaseProcessor in project OpenSearch by opensearch-project.
the class PercolatorMatchedSlotSubFetchPhase method getProcessor.
@Override
public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) throws IOException {
List<PercolateContext> percolateContexts = new ArrayList<>();
List<PercolateQuery> percolateQueries = locatePercolatorQuery(fetchContext.query());
boolean singlePercolateQuery = percolateQueries.size() == 1;
for (PercolateQuery pq : percolateQueries) {
percolateContexts.add(new PercolateContext(pq, singlePercolateQuery));
}
if (percolateContexts.isEmpty()) {
return null;
}
return new FetchSubPhaseProcessor() {
LeafReaderContext ctx;
@Override
public void setNextReader(LeafReaderContext readerContext) {
this.ctx = readerContext;
}
@Override
public void process(HitContext hitContext) throws IOException {
for (PercolateContext pc : percolateContexts) {
String fieldName = pc.fieldName();
Query query = pc.percolateQuery.getQueryStore().getQueries(ctx).apply(hitContext.docId());
if (query == null) {
// This is not a document with a percolator field.
continue;
}
query = pc.filterNestedDocs(query);
IndexSearcher percolatorIndexSearcher = pc.percolateQuery.getPercolatorIndexSearcher();
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
if (topDocs.totalHits.value == 0) {
// likely to happen when percolating multiple documents
continue;
}
IntStream slots = convertTopDocsToSlots(topDocs, pc.rootDocsBySlot);
// _percolator_document_slot fields are document fields and should be under "fields" section in a hit
hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList())));
}
}
};
}
use of org.opensearch.search.fetch.FetchSubPhaseProcessor in project OpenSearch by opensearch-project.
the class FetchScorePhase method getProcessor.
@Override
public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOException {
if (context.fetchScores() == false) {
return null;
}
final IndexSearcher searcher = context.searcher();
final Weight weight = searcher.createWeight(searcher.rewrite(context.query()), ScoreMode.COMPLETE, 1);
return new FetchSubPhaseProcessor() {
Scorer scorer;
@Override
public void setNextReader(LeafReaderContext readerContext) throws IOException {
ScorerSupplier scorerSupplier = weight.scorerSupplier(readerContext);
if (scorerSupplier == null) {
throw new IllegalStateException("Can't compute score on document as it doesn't match the query");
}
// random-access
scorer = scorerSupplier.get(1L);
}
@Override
public void process(HitContext hitContext) throws IOException {
if (scorer == null || scorer.iterator().advance(hitContext.docId()) != hitContext.docId()) {
throw new IllegalStateException("Can't compute score on document " + hitContext + " as it doesn't match the query");
}
hitContext.hit().score(scorer.score());
}
};
}
use of org.opensearch.search.fetch.FetchSubPhaseProcessor in project OpenSearch by opensearch-project.
the class MatchedQueriesPhase method getProcessor.
@Override
public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOException {
Map<String, Query> namedQueries = new HashMap<>();
if (context.parsedQuery() != null) {
namedQueries.putAll(context.parsedQuery().namedFilters());
}
if (context.parsedPostFilter() != null) {
namedQueries.putAll(context.parsedPostFilter().namedFilters());
}
if (namedQueries.isEmpty()) {
return null;
}
Map<String, Weight> weights = new HashMap<>();
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
weights.put(entry.getKey(), context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), ScoreMode.COMPLETE_NO_SCORES, 1));
}
return new FetchSubPhaseProcessor() {
final Map<String, Bits> matchingIterators = new HashMap<>();
@Override
public void setNextReader(LeafReaderContext readerContext) throws IOException {
matchingIterators.clear();
for (Map.Entry<String, Weight> entry : weights.entrySet()) {
ScorerSupplier ss = entry.getValue().scorerSupplier(readerContext);
if (ss != null) {
Bits matchingBits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), ss);
matchingIterators.put(entry.getKey(), matchingBits);
}
}
}
@Override
public void process(HitContext hitContext) {
List<String> matches = new ArrayList<>();
int doc = hitContext.docId();
for (Map.Entry<String, Bits> iterator : matchingIterators.entrySet()) {
if (iterator.getValue().get(doc)) {
matches.add(iterator.getKey());
}
}
hitContext.hit().matchedQueries(matches.toArray(new String[0]));
}
};
}
use of org.opensearch.search.fetch.FetchSubPhaseProcessor in project OpenSearch by opensearch-project.
the class ExplainPhase method getProcessor.
@Override
public FetchSubPhaseProcessor getProcessor(FetchContext context) {
if (context.explain() == false) {
return null;
}
return new FetchSubPhaseProcessor() {
@Override
public void setNextReader(LeafReaderContext readerContext) {
}
@Override
public void process(HitContext hitContext) throws IOException {
final int topLevelDocId = hitContext.hit().docId();
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreContext rescore : context.rescore()) {
explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation);
}
// we use the top level doc id, since we work with the top level searcher
hitContext.hit().explanation(explanation);
}
};
}
use of org.opensearch.search.fetch.FetchSubPhaseProcessor in project OpenSearch by opensearch-project.
the class PercolatorMatchedSlotSubFetchPhaseTests method testHitsExecute.
public void testHitsExecute() throws Exception {
try (Directory directory = newDirectory()) {
// Need a one doc index:
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
indexWriter.addDocument(document);
}
PercolatorMatchedSlotSubFetchPhase phase = new PercolatorMatchedSlotSubFetchPhase();
try (DirectoryReader reader = DirectoryReader.open(directory)) {
LeafReaderContext context = reader.leaves().get(0);
// A match:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, new SourceLookup());
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null);
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
FetchContext sc = mock(FetchContext.class);
when(sc.query()).thenReturn(percolateQuery);
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
assertNotNull(processor);
processor.process(hit);
assertNotNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
assertEquals(0, (int) hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX).getValue());
}
// No match:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, new SourceLookup());
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null);
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
FetchContext sc = mock(FetchContext.class);
when(sc.query()).thenReturn(percolateQuery);
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
assertNotNull(processor);
processor.process(hit);
assertNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
}
// No query:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, new SourceLookup());
PercolateQuery.QueryStore queryStore = ctx -> docId -> null;
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null);
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
FetchContext sc = mock(FetchContext.class);
when(sc.query()).thenReturn(percolateQuery);
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
assertNotNull(processor);
processor.process(hit);
assertNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
}
}
}
}
Aggregations