use of org.opensearch.search.internal.ScrollContext in project OpenSearch by opensearch-project.
the class QueryPhaseTests method testInOrderScrollOptimization.
public void testInOrderScrollOptimization() throws Exception {
Directory dir = newDirectory();
final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(new Document());
}
w.close();
IndexReader reader = DirectoryReader.open(dir);
ScrollContext scrollContext = new ScrollContext();
TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
scrollContext.lastEmittedDoc = null;
scrollContext.maxScore = Float.NaN;
scrollContext.totalHits = null;
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
int size = randomIntBetween(2, 5);
context.setSize(size);
QueryPhase.executeInternal(context);
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
context.setSearcher(newEarlyTerminationContextSearcher(reader, size));
QueryPhase.executeInternal(context);
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(size));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size));
reader.close();
dir.close();
}
use of org.opensearch.search.internal.ScrollContext in project OpenSearch by opensearch-project.
the class QueryPhaseTests method testIndexSortScrollOptimization.
public void testIndexSortScrollOptimization() throws Exception {
Directory dir = newDirectory();
final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT));
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
doc.add(new NumericDocValuesField("rank", random().nextInt()));
doc.add(new NumericDocValuesField("tiebreaker", i));
w.addDocument(doc);
}
if (randomBoolean()) {
w.forceMerge(randomIntBetween(1, 10));
}
w.close();
final IndexReader reader = DirectoryReader.open(dir);
List<SortAndFormats> searchSortAndFormats = new ArrayList<>();
searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }));
// search sort is a prefix of the index sort
searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW }));
for (SortAndFormats searchSortAndFormat : searchSortAndFormats) {
ScrollContext scrollContext = new ScrollContext();
TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
scrollContext.lastEmittedDoc = null;
scrollContext.maxScore = Float.NaN;
scrollContext.totalHits = null;
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
context.setSize(10);
context.sort(searchSortAndFormat);
QueryPhase.executeInternal(context);
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1;
FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1];
context.setSearcher(newEarlyTerminationContextSearcher(reader, 10));
QueryPhase.executeInternal(context);
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) {
@SuppressWarnings("unchecked") FieldComparator<Object> comparator = (FieldComparator<Object>) searchSortAndFormat.sort.getSort()[i].getComparator(1, i);
int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
if (cmp == 0) {
continue;
}
assertThat(cmp, equalTo(1));
break;
}
}
reader.close();
dir.close();
}
use of org.opensearch.search.internal.ScrollContext in project OpenSearch by opensearch-project.
the class QueryPhase method executeInternal.
/**
* In a package-private method so that it can be tested without having to
* wire everything (mapperService, etc.)
* @return whether the rescoring phase should be executed
*/
static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExecutionException {
final ContextIndexSearcher searcher = searchContext.searcher();
final IndexReader reader = searcher.getIndexReader();
QuerySearchResult queryResult = searchContext.queryResult();
queryResult.searchTimedOut(false);
try {
queryResult.from(searchContext.from());
queryResult.size(searchContext.size());
Query query = searchContext.query();
// already rewritten
assert query == searcher.rewrite(query);
final ScrollContext scrollContext = searchContext.scrollContext();
if (scrollContext != null) {
if (scrollContext.totalHits == null) {
// first round
assert scrollContext.lastEmittedDoc == null;
// there is not much that we can optimize here since we want to collect all
// documents in order to get the total number of hits
} else {
final ScoreDoc after = scrollContext.lastEmittedDoc;
if (returnsDocsInOrder(query, searchContext.sort())) {
// skip to the desired doc
if (after != null) {
query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST).add(new MinDocQuery(after.doc + 1), BooleanClause.Occur.FILTER).build();
}
// ... and stop collecting after ${size} matches
searchContext.terminateAfter(searchContext.size());
} else if (canEarlyTerminate(reader, searchContext.sort())) {
// skip to the desired doc
if (after != null) {
query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST).add(new SearchAfterSortedDocQuery(searchContext.sort().sort, (FieldDoc) after), BooleanClause.Occur.FILTER).build();
}
}
}
}
final LinkedList<QueryCollectorContext> collectors = new LinkedList<>();
// whether the chain contains a collector that filters documents
boolean hasFilterCollector = false;
if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) {
// add terminate_after before the filter collectors
// it will only be applied on documents accepted by these filter collectors
collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
if (searchContext.parsedPostFilter() != null) {
// add post filters before aggregations
// it will only be applied to top hits
collectors.add(createFilteredCollectorContext(searcher, searchContext.parsedPostFilter().query()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
if (searchContext.queryCollectors().isEmpty() == false) {
// plug in additional collectors, like aggregations
collectors.add(createMultiCollectorContext(searchContext.queryCollectors().values()));
}
if (searchContext.minimumScore() != null) {
// apply the minimum score after multi collector so we filter aggs as well
collectors.add(createMinScoreCollectorContext(searchContext.minimumScore()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
// optimizing sort on Numerics (long and date)
if ((searchContext.sort() != null) && SYS_PROP_REWRITE_SORT) {
enhanceSortOnNumeric(searchContext, searcher.getIndexReader());
}
boolean timeoutSet = scrollContext == null && searchContext.timeout() != null && searchContext.timeout().equals(SearchService.NO_TIMEOUT) == false;
final Runnable timeoutRunnable;
if (timeoutSet) {
final long startTime = searchContext.getRelativeTimeInMillis();
final long timeout = searchContext.timeout().millis();
final long maxTime = startTime + timeout;
timeoutRunnable = searcher.addQueryCancellation(() -> {
final long time = searchContext.getRelativeTimeInMillis();
if (time > maxTime) {
throw new TimeExceededException();
}
});
} else {
timeoutRunnable = null;
}
if (searchContext.lowLevelCancellation()) {
searcher.addQueryCancellation(() -> {
SearchShardTask task = searchContext.getTask();
if (task != null && task.isCancelled()) {
throw new TaskCancelledException("cancelled task with reason: " + task.getReasonCancelled());
}
});
}
try {
boolean shouldRescore = searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet);
ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH);
if (executor instanceof QueueResizingOpenSearchThreadPoolExecutor) {
QueueResizingOpenSearchThreadPoolExecutor rExecutor = (QueueResizingOpenSearchThreadPoolExecutor) executor;
queryResult.nodeQueueSize(rExecutor.getCurrentQueueSize());
queryResult.serviceTimeEWMA((long) rExecutor.getTaskExecutionEWMA());
}
return shouldRescore;
} finally {
// otherwise aggregation phase might get cancelled.
if (timeoutRunnable != null) {
searcher.removeQueryCancellation(timeoutRunnable);
}
}
} catch (Exception e) {
throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Failed to execute main query", e);
}
}
Aggregations