use of org.opensearch.tasks.TaskCancelledException in project OpenSearch by opensearch-project.
the class CancellableTasksTests method testRegisterAndExecuteChildTaskWhileParentTaskIsBeingCanceled.
public void testRegisterAndExecuteChildTaskWhileParentTaskIsBeingCanceled() throws Exception {
setupTestNodes(Settings.EMPTY);
connectNodes(testNodes);
final TaskManager taskManager = testNodes[0].transportService.getTaskManager();
CancellableNodesRequest parentRequest = new CancellableNodesRequest("parent");
final Task parentTask = taskManager.register("test", "test", parentRequest);
final TaskId parentTaskId = parentTask.taskInfo(testNodes[0].getNodeId(), false).getTaskId();
taskManager.setBan(new TaskId(testNodes[0].getNodeId(), parentTask.getId()), "test");
CancellableNodesRequest childRequest = new CancellableNodesRequest("child");
childRequest.setParentTask(parentTaskId);
CancellableTestNodesAction testAction = new CancellableTestNodesAction("internal:testAction", threadPool, testNodes[1].clusterService, testNodes[0].transportService, false, new CountDownLatch(1));
TaskCancelledException cancelledException = expectThrows(TaskCancelledException.class, () -> testAction.execute(childRequest, ActionListener.wrap(() -> fail("must not execute"))));
assertThat(cancelledException.getMessage(), startsWith("Task cancelled before it started:"));
CountDownLatch latch = new CountDownLatch(1);
taskManager.startBanOnChildrenNodes(parentTaskId.getId(), latch::countDown);
assertTrue("onChildTasksCompleted() is not invoked", latch.await(1, TimeUnit.SECONDS));
}
use of org.opensearch.tasks.TaskCancelledException in project OpenSearch by opensearch-project.
the class TransportAction method execute.
/**
* Execute the transport action on the local node, returning the {@link Task} used to track its execution and accepting a
* {@link TaskListener} which listens for the completion of the action.
*/
public final Task execute(Request request, TaskListener<Response> listener) {
final Releasable unregisterChildNode = registerChildNode(request.getParentTask());
final Task task;
try {
task = taskManager.register("transport", actionName, request);
} catch (TaskCancelledException e) {
unregisterChildNode.close();
throw e;
}
execute(task, request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
Releasables.close(unregisterChildNode, () -> taskManager.unregister(task));
} finally {
listener.onResponse(task, response);
}
}
@Override
public void onFailure(Exception e) {
try {
Releasables.close(unregisterChildNode, () -> taskManager.unregister(task));
} finally {
listener.onFailure(task, e);
}
}
});
return task;
}
use of org.opensearch.tasks.TaskCancelledException in project OpenSearch by opensearch-project.
the class QueryPhase method executeInternal.
/**
* In a package-private method so that it can be tested without having to
* wire everything (mapperService, etc.)
* @return whether the rescoring phase should be executed
*/
static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExecutionException {
final ContextIndexSearcher searcher = searchContext.searcher();
final IndexReader reader = searcher.getIndexReader();
QuerySearchResult queryResult = searchContext.queryResult();
queryResult.searchTimedOut(false);
try {
queryResult.from(searchContext.from());
queryResult.size(searchContext.size());
Query query = searchContext.query();
// already rewritten
assert query == searcher.rewrite(query);
final ScrollContext scrollContext = searchContext.scrollContext();
if (scrollContext != null) {
if (scrollContext.totalHits == null) {
// first round
assert scrollContext.lastEmittedDoc == null;
// there is not much that we can optimize here since we want to collect all
// documents in order to get the total number of hits
} else {
final ScoreDoc after = scrollContext.lastEmittedDoc;
if (returnsDocsInOrder(query, searchContext.sort())) {
// skip to the desired doc
if (after != null) {
query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST).add(new MinDocQuery(after.doc + 1), BooleanClause.Occur.FILTER).build();
}
// ... and stop collecting after ${size} matches
searchContext.terminateAfter(searchContext.size());
} else if (canEarlyTerminate(reader, searchContext.sort())) {
// skip to the desired doc
if (after != null) {
query = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST).add(new SearchAfterSortedDocQuery(searchContext.sort().sort, (FieldDoc) after), BooleanClause.Occur.FILTER).build();
}
}
}
}
final LinkedList<QueryCollectorContext> collectors = new LinkedList<>();
// whether the chain contains a collector that filters documents
boolean hasFilterCollector = false;
if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) {
// add terminate_after before the filter collectors
// it will only be applied on documents accepted by these filter collectors
collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
if (searchContext.parsedPostFilter() != null) {
// add post filters before aggregations
// it will only be applied to top hits
collectors.add(createFilteredCollectorContext(searcher, searchContext.parsedPostFilter().query()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
if (searchContext.queryCollectors().isEmpty() == false) {
// plug in additional collectors, like aggregations
collectors.add(createMultiCollectorContext(searchContext.queryCollectors().values()));
}
if (searchContext.minimumScore() != null) {
// apply the minimum score after multi collector so we filter aggs as well
collectors.add(createMinScoreCollectorContext(searchContext.minimumScore()));
// this collector can filter documents during the collection
hasFilterCollector = true;
}
// optimizing sort on Numerics (long and date)
if ((searchContext.sort() != null) && SYS_PROP_REWRITE_SORT) {
enhanceSortOnNumeric(searchContext, searcher.getIndexReader());
}
boolean timeoutSet = scrollContext == null && searchContext.timeout() != null && searchContext.timeout().equals(SearchService.NO_TIMEOUT) == false;
final Runnable timeoutRunnable;
if (timeoutSet) {
final long startTime = searchContext.getRelativeTimeInMillis();
final long timeout = searchContext.timeout().millis();
final long maxTime = startTime + timeout;
timeoutRunnable = searcher.addQueryCancellation(() -> {
final long time = searchContext.getRelativeTimeInMillis();
if (time > maxTime) {
throw new TimeExceededException();
}
});
} else {
timeoutRunnable = null;
}
if (searchContext.lowLevelCancellation()) {
searcher.addQueryCancellation(() -> {
SearchShardTask task = searchContext.getTask();
if (task != null && task.isCancelled()) {
throw new TaskCancelledException("cancelled task with reason: " + task.getReasonCancelled());
}
});
}
try {
boolean shouldRescore = searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet);
ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH);
if (executor instanceof QueueResizingOpenSearchThreadPoolExecutor) {
QueueResizingOpenSearchThreadPoolExecutor rExecutor = (QueueResizingOpenSearchThreadPoolExecutor) executor;
queryResult.nodeQueueSize(rExecutor.getCurrentQueueSize());
queryResult.serviceTimeEWMA((long) rExecutor.getTaskExecutionEWMA());
}
return shouldRescore;
} finally {
// otherwise aggregation phase might get cancelled.
if (timeoutRunnable != null) {
searcher.removeQueryCancellation(timeoutRunnable);
}
}
} catch (Exception e) {
throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Failed to execute main query", e);
}
}
use of org.opensearch.tasks.TaskCancelledException in project OpenSearch by opensearch-project.
the class DfsPhase method execute.
public void execute(SearchContext context) {
try {
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
Map<Term, TermStatistics> stats = new HashMap<>();
IndexSearcher searcher = new IndexSearcher(context.searcher().getIndexReader()) {
@Override
public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) throws IOException {
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled());
}
TermStatistics ts = super.termStatistics(term, docFreq, totalTermFreq);
if (ts != null) {
stats.put(term, ts);
}
return ts;
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled());
}
CollectionStatistics cs = super.collectionStatistics(field);
if (cs != null) {
fieldStatistics.put(field, cs);
}
return cs;
}
};
searcher.createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1);
for (RescoreContext rescoreContext : context.rescore()) {
for (Query query : rescoreContext.getQueries()) {
searcher.createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE, 1);
}
}
Term[] terms = stats.keySet().toArray(new Term[0]);
TermStatistics[] termStatistics = new TermStatistics[terms.length];
for (int i = 0; i < terms.length; i++) {
termStatistics[i] = stats.get(terms[i]);
}
context.dfsResult().termsStatistics(terms, termStatistics).fieldStatistics(fieldStatistics).maxDoc(context.searcher().getIndexReader().maxDoc());
} catch (Exception e) {
throw new DfsPhaseExecutionException(context.shardTarget(), "Exception during dfs phase", e);
}
}
use of org.opensearch.tasks.TaskCancelledException in project OpenSearch by opensearch-project.
the class SearchCancellationTests method testExitableDirectoryReader.
public void testExitableDirectoryReader() throws IOException {
AtomicBoolean cancelled = new AtomicBoolean(true);
Runnable cancellation = () -> {
if (cancelled.get()) {
throw new TaskCancelledException("cancelled");
}
};
ContextIndexSearcher searcher = new ContextIndexSearcher(reader, IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), true);
searcher.addQueryCancellation(cancellation);
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("a.*").toAutomaton());
expectThrows(TaskCancelledException.class, () -> searcher.getIndexReader().leaves().get(0).reader().terms(STRING_FIELD_NAME).iterator());
expectThrows(TaskCancelledException.class, () -> searcher.getIndexReader().leaves().get(0).reader().terms(STRING_FIELD_NAME).intersect(automaton, null));
expectThrows(TaskCancelledException.class, () -> searcher.getIndexReader().leaves().get(0).reader().getPointValues(POINT_FIELD_NAME));
expectThrows(TaskCancelledException.class, () -> searcher.getIndexReader().leaves().get(0).reader().getPointValues(POINT_FIELD_NAME));
// Avoid exception during construction of the wrapper objects
cancelled.set(false);
Terms terms = searcher.getIndexReader().leaves().get(0).reader().terms(STRING_FIELD_NAME);
TermsEnum termsIterator = terms.iterator();
TermsEnum termsIntersect = terms.intersect(automaton, null);
PointValues pointValues1 = searcher.getIndexReader().leaves().get(0).reader().getPointValues(POINT_FIELD_NAME);
cancelled.set(true);
expectThrows(TaskCancelledException.class, termsIterator::next);
expectThrows(TaskCancelledException.class, termsIntersect::next);
expectThrows(TaskCancelledException.class, pointValues1::getDocCount);
expectThrows(TaskCancelledException.class, pointValues1::getNumIndexDimensions);
expectThrows(TaskCancelledException.class, () -> pointValues1.intersect(new PointValuesIntersectVisitor()));
// Avoid exception during construction of the wrapper objects
cancelled.set(false);
// Re-initialize objects so that we reset the `calls` counter used to avoid cancellation check
// on every iteration and assure that cancellation would normally happen if we hadn't removed the
// cancellation runnable.
termsIterator = terms.iterator();
termsIntersect = terms.intersect(automaton, null);
PointValues pointValues2 = searcher.getIndexReader().leaves().get(0).reader().getPointValues(POINT_FIELD_NAME);
cancelled.set(true);
searcher.removeQueryCancellation(cancellation);
termsIterator.next();
termsIntersect.next();
pointValues2.getDocCount();
pointValues2.getNumIndexDimensions();
pointValues2.intersect(new PointValuesIntersectVisitor());
}
Aggregations