Search in sources :

Example 1 with Task

use of org.opensearch.tasks.Task in project OpenSearch by opensearch-project.

the class TransportAction method execute.

/**
 * Use this method when the transport action call should result in creation of a new task associated with the call.
 *
 * This is a typical behavior.
 */
public final Task execute(Request request, ActionListener<Response> listener) {
    /*
         * While this version of execute could delegate to the TaskListener
         * version of execute that'd add yet another layer of wrapping on the
         * listener and prevent us from using the listener bare if there isn't a
         * task. That just seems like too many objects. Thus the two versions of
         * this method.
         */
    final Releasable unregisterChildNode = registerChildNode(request.getParentTask());
    final Task task;
    try {
        task = taskManager.register("transport", actionName, request);
    } catch (TaskCancelledException e) {
        unregisterChildNode.close();
        throw e;
    }
    execute(task, request, new ActionListener<Response>() {

        @Override
        public void onResponse(Response response) {
            try {
                Releasables.close(unregisterChildNode, () -> taskManager.unregister(task));
            } finally {
                listener.onResponse(response);
            }
        }

        @Override
        public void onFailure(Exception e) {
            try {
                Releasables.close(unregisterChildNode, () -> taskManager.unregister(task));
            } finally {
                listener.onFailure(e);
            }
        }
    });
    return task;
}
Also used : ActionResponse(org.opensearch.action.ActionResponse) Task(org.opensearch.tasks.Task) Releasable(org.opensearch.common.lease.Releasable) TaskCancelledException(org.opensearch.tasks.TaskCancelledException) ActionRequestValidationException(org.opensearch.action.ActionRequestValidationException) TaskCancelledException(org.opensearch.tasks.TaskCancelledException)

Example 2 with Task

use of org.opensearch.tasks.Task in project OpenSearch by opensearch-project.

the class TransportBulkAction method doInternalExecute.

protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener<BulkResponse> listener) {
    final long startTime = relativeTime();
    final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
    boolean hasIndexRequestsWithPipelines = false;
    final Metadata metadata = clusterService.state().getMetadata();
    final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion();
    for (DocWriteRequest<?> actionRequest : bulkRequest.requests) {
        IndexRequest indexRequest = getIndexWriteRequest(actionRequest);
        if (indexRequest != null) {
            // Each index request needs to be evaluated, because this method also modifies the IndexRequest
            boolean indexRequestHasPipeline = IngestService.resolvePipelines(actionRequest, indexRequest, metadata);
            hasIndexRequestsWithPipelines |= indexRequestHasPipeline;
        }
        if (actionRequest instanceof IndexRequest) {
            IndexRequest ir = (IndexRequest) actionRequest;
            ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion);
            if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) {
                throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally");
            }
        }
    }
    if (hasIndexRequestsWithPipelines) {
        // this path is never taken.
        try {
            if (Assertions.ENABLED) {
                final boolean arePipelinesResolved = bulkRequest.requests().stream().map(TransportBulkAction::getIndexWriteRequest).filter(Objects::nonNull).allMatch(IndexRequest::isPipelineResolved);
                assert arePipelinesResolved : bulkRequest;
            }
            if (clusterService.localNode().isIngestNode()) {
                processBulkIndexIngestRequest(task, bulkRequest, executorName, listener);
            } else {
                ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener);
            }
        } catch (Exception e) {
            listener.onFailure(e);
        }
        return;
    }
    final boolean includesSystem = includesSystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices);
    if (includesSystem || needToCheck()) {
        // Attempt to create all the indices that we're going to need during the bulk before we start.
        // Step 1: collect all the indices in the request
        final Map<String, Boolean> indices = bulkRequest.requests.stream().filter(request -> request.opType() != DocWriteRequest.OpType.DELETE || request.versionType() == VersionType.EXTERNAL || request.versionType() == VersionType.EXTERNAL_GTE).collect(Collectors.toMap(DocWriteRequest::index, DocWriteRequest::isRequireAlias, (v1, v2) -> v1 || v2));
        /* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
             * that we'll use when we try to run the requests. */
        final Map<String, IndexNotFoundException> indicesThatCannotBeCreated = new HashMap<>();
        Set<String> autoCreateIndices = new HashSet<>();
        ClusterState state = clusterService.state();
        for (Map.Entry<String, Boolean> indexAndFlag : indices.entrySet()) {
            boolean shouldAutoCreate;
            final String index = indexAndFlag.getKey();
            try {
                shouldAutoCreate = shouldAutoCreate(index, state);
            } catch (IndexNotFoundException e) {
                shouldAutoCreate = false;
                indicesThatCannotBeCreated.put(index, e);
            }
            // We should only auto create if we are not requiring it to be an alias
            if (shouldAutoCreate && (indexAndFlag.getValue() == false)) {
                autoCreateIndices.add(index);
            }
        }
        // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back.
        if (autoCreateIndices.isEmpty()) {
            executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
        } else {
            final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
            for (String index : autoCreateIndices) {
                createIndex(index, bulkRequest.timeout(), minNodeVersion, new ActionListener<CreateIndexResponse>() {

                    @Override
                    public void onResponse(CreateIndexResponse result) {
                        if (counter.decrementAndGet() == 0) {
                            threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(listener) {

                                @Override
                                protected void doRun() {
                                    executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
                                }
                            });
                        }
                    }

                    @Override
                    public void onFailure(Exception e) {
                        if (!(ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException)) {
                            // fail all requests involving this index, if create didn't work
                            for (int i = 0; i < bulkRequest.requests.size(); i++) {
                                DocWriteRequest<?> request = bulkRequest.requests.get(i);
                                if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
                                    bulkRequest.requests.set(i, null);
                                }
                            }
                        }
                        if (counter.decrementAndGet() == 0) {
                            final ActionListener<BulkResponse> wrappedListener = ActionListener.wrap(listener::onResponse, inner -> {
                                inner.addSuppressed(e);
                                listener.onFailure(inner);
                            });
                            threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(wrappedListener) {

                                @Override
                                protected void doRun() {
                                    executeBulk(task, bulkRequest, startTime, wrappedListener, responses, indicesThatCannotBeCreated);
                                }

                                @Override
                                public void onRejection(Exception rejectedException) {
                                    rejectedException.addSuppressed(e);
                                    super.onRejection(rejectedException);
                                }
                            });
                        }
                    }
                });
            }
        }
    } else {
        executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap());
    }
}
Also used : SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) IndexAbstraction(org.opensearch.cluster.metadata.IndexAbstraction) Metadata(org.opensearch.cluster.metadata.Metadata) LongSupplier(java.util.function.LongSupplier) DataStream(org.opensearch.cluster.metadata.DataStream) Version(org.opensearch.Version) TransportUpdateAction(org.opensearch.action.update.TransportUpdateAction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) Assertions(org.opensearch.Assertions) Map(java.util.Map) NodeClosedException(org.opensearch.node.NodeClosedException) AutoCreateAction(org.opensearch.action.admin.indices.create.AutoCreateAction) Inject(org.opensearch.common.inject.Inject) ActionListener(org.opensearch.action.ActionListener) AtomicIntegerArray(java.util.concurrent.atomic.AtomicIntegerArray) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) TimeValue(org.opensearch.common.unit.TimeValue) NodeClient(org.opensearch.client.node.NodeClient) Index(org.opensearch.index.Index) IndexingPressureService(org.opensearch.index.IndexingPressureService) OpenSearchParseException(org.opensearch.OpenSearchParseException) ExceptionsHelper(org.opensearch.ExceptionsHelper) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) Set(java.util.Set) Task(org.opensearch.tasks.Task) TransportService(org.opensearch.transport.TransportService) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) ActionFilters(org.opensearch.action.support.ActionFilters) VersionType(org.opensearch.index.VersionType) List(java.util.List) Logger(org.apache.logging.log4j.Logger) SparseFixedBitSet(org.apache.lucene.util.SparseFixedBitSet) EXCLUDED_DATA_STREAMS_KEY(org.opensearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) DocWriteResponse(org.opensearch.action.DocWriteResponse) UpdateRequest(org.opensearch.action.update.UpdateRequest) SortedMap(java.util.SortedMap) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) Names(org.opensearch.threadpool.ThreadPool.Names) MappingMetadata(org.opensearch.cluster.metadata.MappingMetadata) HandledTransportAction(org.opensearch.action.support.HandledTransportAction) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) UpdateResponse(org.opensearch.action.update.UpdateResponse) ThreadPool(org.opensearch.threadpool.ThreadPool) RoutingMissingException(org.opensearch.action.RoutingMissingException) DocWriteRequest(org.opensearch.action.DocWriteRequest) HashMap(java.util.HashMap) Releasable(org.opensearch.common.lease.Releasable) ArrayList(java.util.ArrayList) AutoCreateIndex(org.opensearch.action.support.AutoCreateIndex) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) UNASSIGNED_SEQ_NO(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO) LegacyESVersion(org.opensearch.LegacyESVersion) IndexClosedException(org.opensearch.indices.IndexClosedException) ClusterStateObserver(org.opensearch.cluster.ClusterStateObserver) Collections.emptyMap(java.util.Collections.emptyMap) IngestService(org.opensearch.ingest.IngestService) Iterator(java.util.Iterator) IngestActionForwarder(org.opensearch.action.ingest.IngestActionForwarder) ClusterBlockLevel(org.opensearch.cluster.block.ClusterBlockLevel) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) CreateIndexResponse(org.opensearch.action.admin.indices.create.CreateIndexResponse) UNASSIGNED_PRIMARY_TERM(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) SystemIndices(org.opensearch.indices.SystemIndices) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ClusterService(org.opensearch.cluster.service.ClusterService) IndexRequest(org.opensearch.action.index.IndexRequest) LogManager(org.apache.logging.log4j.LogManager) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ActionRunnable(org.opensearch.action.ActionRunnable) HashMap(java.util.HashMap) Metadata(org.opensearch.cluster.metadata.Metadata) MappingMetadata(org.opensearch.cluster.metadata.MappingMetadata) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) IndexRequest(org.opensearch.action.index.IndexRequest) Version(org.opensearch.Version) LegacyESVersion(org.opensearch.LegacyESVersion) CreateIndexResponse(org.opensearch.action.admin.indices.create.CreateIndexResponse) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) NodeClosedException(org.opensearch.node.NodeClosedException) OpenSearchParseException(org.opensearch.OpenSearchParseException) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) RoutingMissingException(org.opensearch.action.RoutingMissingException) IndexClosedException(org.opensearch.indices.IndexClosedException) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) ActionListener(org.opensearch.action.ActionListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) DocWriteRequest(org.opensearch.action.DocWriteRequest) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) Collections.emptyMap(java.util.Collections.emptyMap)

Example 3 with Task

use of org.opensearch.tasks.Task in project OpenSearch by opensearch-project.

the class TasksIT method waitForCompletionTestCase.

/**
 * Test wait for completion.
 * @param storeResult should the task store its results
 * @param wait start waiting for a task. Accepts that id of the task to wait for and returns a future waiting for it.
 * @param validator validate the response and return the task ids that were found
 */
private <T> void waitForCompletionTestCase(boolean storeResult, Function<TaskId, ActionFuture<T>> wait, Consumer<T> validator) throws Exception {
    // Start blocking test task
    TestTaskPlugin.NodesRequest request = new TestTaskPlugin.NodesRequest("test");
    request.setShouldStoreResult(storeResult);
    ActionFuture<TestTaskPlugin.NodesResponse> future = client().execute(TestTaskPlugin.TestTaskAction.INSTANCE, request);
    ActionFuture<T> waitResponseFuture;
    TaskId taskId;
    try {
        taskId = waitForTestTaskStartOnAllNodes();
        // Wait for the task to start
        assertBusy(() -> client().admin().cluster().prepareGetTask(taskId).get());
        // Register listeners so we can be sure the waiting started
        CountDownLatch waitForWaitingToStart = new CountDownLatch(1);
        for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
            ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {

                @Override
                public void waitForTaskCompletion(Task task) {
                    waitForWaitingToStart.countDown();
                }

                @Override
                public void onTaskRegistered(Task task) {
                }

                @Override
                public void onTaskUnregistered(Task task) {
                }
            });
        }
        // Spin up a request to wait for the test task to finish
        waitResponseFuture = wait.apply(taskId);
        /* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a
             * reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait
             * on which is fine. */
        waitForWaitingToStart.await();
    } finally {
        // Unblock the request so the wait for completion request can finish
        new TestTaskPlugin.UnblockTestTasksRequestBuilder(client(), TestTaskPlugin.UnblockTestTasksAction.INSTANCE).get();
    }
    // Now that the task is unblocked the list response will come back
    T waitResponse = waitResponseFuture.get();
    validator.accept(waitResponse);
    TestTaskPlugin.NodesResponse response = future.get();
    assertEquals(emptyList(), response.failures());
}
Also used : Task(org.opensearch.tasks.Task) TaskId(org.opensearch.tasks.TaskId) MockTaskManagerListener(org.opensearch.test.tasks.MockTaskManagerListener) CountDownLatch(java.util.concurrent.CountDownLatch) MockTaskManager(org.opensearch.test.tasks.MockTaskManager) SearchTransportService(org.opensearch.action.search.SearchTransportService) TransportService(org.opensearch.transport.TransportService) MockTransportService(org.opensearch.test.transport.MockTransportService)

Example 4 with Task

use of org.opensearch.tasks.Task in project OpenSearch by opensearch-project.

the class TasksIT method testTransportBroadcastReplicationTasks.

public void testTransportBroadcastReplicationTasks() {
    // main task
    registerTaskManagerListeners(RefreshAction.NAME);
    // shard level tasks
    registerTaskManagerListeners(RefreshAction.NAME + "[s]");
    // primary and replica shard tasks
    registerTaskManagerListeners(RefreshAction.NAME + "[s][*]");
    createIndex("test");
    // Make sure all shards are allocated
    ensureGreen("test");
    client().admin().indices().prepareRefresh("test").get();
    // the refresh operation should produce one main task
    NumShards numberOfShards = getNumShards("test");
    logger.debug("number of shards, total: [{}], primaries: [{}] ", numberOfShards.totalNumShards, numberOfShards.numPrimaries);
    logger.debug("main events {}", numberOfEvents(RefreshAction.NAME, Tuple::v1));
    logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getTaskId().getNodeId());
    logger.debug("[s] events {}", numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1));
    logger.debug("[s][*] events {}", numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
    logger.debug("nodes with the index {}", internalCluster().nodesInclude("test"));
    assertEquals(1, numberOfEvents(RefreshAction.NAME, Tuple::v1));
    // Because it's broadcast replication action we will have as many [s] level requests
    // as we have primary shards on the coordinating node plus we will have one task per primary outside of the
    // coordinating node due to replication.
    // If all primaries are on the coordinating node, the number of tasks should be equal to the number of primaries
    // If all primaries are not on the coordinating node, the number of tasks should be equal to the number of primaries times 2
    assertThat(numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1), greaterThanOrEqualTo(numberOfShards.numPrimaries));
    assertThat(numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1), lessThanOrEqualTo(numberOfShards.numPrimaries * 2));
    // Verify that all [s] events have the proper parent
    // This is complicated because if the shard task runs on the same node it has main task as a parent
    // but if it runs on non-coordinating node it would have another intermediate [s] task on the coordinating node as a parent
    TaskInfo mainTask = findEvents(RefreshAction.NAME, Tuple::v1).get(0);
    List<TaskInfo> sTasks = findEvents(RefreshAction.NAME + "[s]", Tuple::v1);
    for (TaskInfo taskInfo : sTasks) {
        if (mainTask.getTaskId().getNodeId().equals(taskInfo.getTaskId().getNodeId())) {
            // This shard level task runs on the same node as a parent task - it should have the main task as a direct parent
            assertParentTask(Collections.singletonList(taskInfo), mainTask);
        } else {
            String description = taskInfo.getDescription();
            // This shard level task runs on another node - it should have a corresponding shard level task on the node where main task
            // is running
            List<TaskInfo> sTasksOnRequestingNode = findEvents(RefreshAction.NAME + "[s]", event -> event.v1() && mainTask.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) && description.equals(event.v2().getDescription()));
            // There should be only one parent task
            assertEquals(1, sTasksOnRequestingNode.size());
            assertParentTask(Collections.singletonList(taskInfo), sTasksOnRequestingNode.get(0));
        }
    }
    // we will have as many [s][p] and [s][r] tasks as we have primary and replica shards
    assertEquals(numberOfShards.totalNumShards, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
    // we the [s][p] and [s][r] tasks should have a corresponding [s] task on the same node as a parent
    List<TaskInfo> spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1);
    for (TaskInfo taskInfo : spEvents) {
        List<TaskInfo> sTask;
        if (taskInfo.getAction().endsWith("[s][p]")) {
            // A [s][p] level task should have a corresponding [s] level task on the same node
            sTask = findEvents(RefreshAction.NAME + "[s]", event -> event.v1() && taskInfo.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) && taskInfo.getDescription().equals(event.v2().getDescription()));
        } else {
            // A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located)
            sTask = findEvents(RefreshAction.NAME + "[s]", event -> event.v1() && taskInfo.getParentTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) && taskInfo.getDescription().equals(event.v2().getDescription()));
        }
        // There should be only one parent task
        assertEquals(1, sTask.size());
        assertParentTask(Collections.singletonList(taskInfo), sTask.get(0));
    }
}
Also used : TaskInfo(org.opensearch.tasks.TaskInfo) Arrays(java.util.Arrays) IndexResponse(org.opensearch.action.index.IndexResponse) OpenSearchAssertions.assertSearchResponse(org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse) Matchers.not(org.hamcrest.Matchers.not) BulkAction(org.opensearch.action.bulk.BulkAction) OpenSearchException(org.opensearch.OpenSearchException) SearchTransportService(org.opensearch.action.search.SearchTransportService) Regex(org.opensearch.common.regex.Regex) Strings(org.opensearch.common.Strings) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) RefreshAction(org.opensearch.action.admin.indices.refresh.RefreshAction) WriteRequest(org.opensearch.action.support.WriteRequest) Collections.singleton(java.util.Collections.singleton) Map(java.util.Map) Matchers.emptyCollectionOf(org.hamcrest.Matchers.emptyCollectionOf) ActionListener(org.opensearch.action.ActionListener) CancelTasksResponse(org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse) TaskResultsService(org.opensearch.tasks.TaskResultsService) CyclicBarrier(java.util.concurrent.CyclicBarrier) OpenSearchAssertions.assertNoFailures(org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) Collections.emptyList(java.util.Collections.emptyList) Matchers.allOf(org.hamcrest.Matchers.allOf) Matchers.lessThanOrEqualTo(org.hamcrest.Matchers.lessThanOrEqualTo) Collection(java.util.Collection) TaskResult(org.opensearch.tasks.TaskResult) ExceptionsHelper(org.opensearch.ExceptionsHelper) MockTaskManager(org.opensearch.test.tasks.MockTaskManager) Settings(org.opensearch.common.settings.Settings) Task(org.opensearch.tasks.Task) ClusterHealthAction(org.opensearch.action.admin.cluster.health.ClusterHealthAction) TransportService(org.opensearch.transport.TransportService) Matchers.startsWith(org.hamcrest.Matchers.startsWith) Tuple(org.opensearch.common.collect.Tuple) TimeValue.timeValueSeconds(org.opensearch.common.unit.TimeValue.timeValueSeconds) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) SearchSourceBuilder(org.opensearch.search.builder.SearchSourceBuilder) OpenSearchTimeoutException(org.opensearch.OpenSearchTimeoutException) IndexAction(org.opensearch.action.index.IndexAction) ReplicationResponse(org.opensearch.action.support.replication.ReplicationResponse) XContentType(org.opensearch.common.xcontent.XContentType) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) TransportReplicationActionTests(org.opensearch.action.support.replication.TransportReplicationActionTests) OpenSearchIntegTestCase(org.opensearch.test.OpenSearchIntegTestCase) Matchers.containsString(org.hamcrest.Matchers.containsString) ReceiveTimeoutTransportException(org.opensearch.transport.ReceiveTimeoutTransportException) SearchAction(org.opensearch.action.search.SearchAction) TimeValue.timeValueMillis(org.opensearch.common.unit.TimeValue.timeValueMillis) UpgradeAction(org.opensearch.action.admin.indices.upgrade.post.UpgradeAction) HashMap(java.util.HashMap) ResourceNotFoundException(org.opensearch.ResourceNotFoundException) MockTransportService(org.opensearch.test.transport.MockTransportService) ValidateQueryAction(org.opensearch.action.admin.indices.validate.query.ValidateQueryAction) Function(java.util.function.Function) ArrayList(java.util.ArrayList) ListTasksResponse(org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse) Matchers.hasSize(org.hamcrest.Matchers.hasSize) SearchResponse(org.opensearch.action.search.SearchResponse) QueryBuilders(org.opensearch.index.query.QueryBuilders) Matchers.empty(org.hamcrest.Matchers.empty) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) ListTasksAction(org.opensearch.action.admin.cluster.node.tasks.list.ListTasksAction) TaskId(org.opensearch.tasks.TaskId) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) GetTaskRequest(org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest) OpenSearchAssertions.assertFutureThrows(org.opensearch.test.hamcrest.OpenSearchAssertions.assertFutureThrows) Plugin(org.opensearch.plugins.Plugin) ActionFuture(org.opensearch.action.ActionFuture) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) TaskInfo(org.opensearch.tasks.TaskInfo) TaskOperationFailure(org.opensearch.action.TaskOperationFailure) ClusterService(org.opensearch.cluster.service.ClusterService) GetTaskResponse(org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse) Collections(java.util.Collections) MockTaskManagerListener(org.opensearch.test.tasks.MockTaskManagerListener) SETTING_HTTP_MAX_HEADER_SIZE(org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE) Matchers.containsString(org.hamcrest.Matchers.containsString) Tuple(org.opensearch.common.collect.Tuple)

Example 5 with Task

use of org.opensearch.tasks.Task in project OpenSearch by opensearch-project.

the class CancellableTasksIT method testBanOnlyNodesWithOutstandingDescendantTasks.

public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception {
    if (randomBoolean()) {
        internalCluster().startNodes(randomIntBetween(1, 3));
    }
    Set<DiscoveryNode> nodes = StreamSupport.stream(clusterService().state().nodes().spliterator(), false).collect(Collectors.toSet());
    final TestRequest rootRequest = generateTestRequest(nodes, 0, between(1, 4));
    ActionFuture<TestResponse> rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest);
    Set<TestRequest> pendingRequests = allowPartialRequest(rootRequest);
    TaskId rootTaskId = getRootTaskId(rootRequest);
    ActionFuture<CancelTasksResponse> cancelFuture = client().admin().cluster().prepareCancelTasks().setTaskId(rootTaskId).waitForCompletion(true).execute();
    if (randomBoolean()) {
        List<TaskInfo> runningTasks = client().admin().cluster().prepareListTasks().setActions(TransportTestAction.ACTION.name()).setDetailed(true).get().getTasks();
        for (TaskInfo subTask : randomSubsetOf(runningTasks)) {
            client().admin().cluster().prepareCancelTasks().setTaskId(subTask.getTaskId()).waitForCompletion(false).get();
        }
    }
    assertBusy(() -> {
        for (DiscoveryNode node : nodes) {
            TaskManager taskManager = internalCluster().getInstance(TransportService.class, node.getName()).getTaskManager();
            Set<TaskId> expectedBans = new HashSet<>();
            for (TestRequest req : pendingRequests) {
                if (req.node.equals(node)) {
                    List<Task> childTasks = taskManager.getTasks().values().stream().filter(t -> t.getParentTaskId() != null && t.getDescription().equals(req.taskDescription())).collect(Collectors.toList());
                    assertThat(childTasks, hasSize(1));
                    CancellableTask childTask = (CancellableTask) childTasks.get(0);
                    assertTrue(childTask.isCancelled());
                    expectedBans.add(childTask.getParentTaskId());
                }
            }
            assertThat(taskManager.getBannedTaskIds(), equalTo(expectedBans));
        }
    }, 30, TimeUnit.SECONDS);
    allowEntireRequest(rootRequest);
    cancelFuture.actionGet();
    waitForRootTask(rootTaskFuture);
    ensureAllBansRemoved();
}
Also used : ActionPlugin(org.opensearch.plugins.ActionPlugin) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) ActionRequest(org.opensearch.action.ActionRequest) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) LatchedActionListener(org.opensearch.action.LatchedActionListener) PlainActionFuture(org.opensearch.action.support.PlainActionFuture) Map(java.util.Map) Inject(org.opensearch.common.inject.Inject) ActionListener(org.opensearch.action.ActionListener) ActionResponse(org.opensearch.action.ActionResponse) ActionType(org.opensearch.action.ActionType) CancelTasksResponse(org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse) CancellableTask(org.opensearch.tasks.CancellableTask) NodeClient(org.opensearch.client.node.NodeClient) Collection(java.util.Collection) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) Task(org.opensearch.tasks.Task) TransportService(org.opensearch.transport.TransportService) Collectors(java.util.stream.Collectors) TaskManager(org.opensearch.tasks.TaskManager) Objects(java.util.Objects) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) ActionFilters(org.opensearch.action.support.ActionFilters) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Matchers.equalTo(org.hamcrest.Matchers.equalTo) TaskCancelledException(org.opensearch.tasks.TaskCancelledException) Matchers.anyOf(org.hamcrest.Matchers.anyOf) OpenSearchIntegTestCase(org.opensearch.test.OpenSearchIntegTestCase) Matchers.containsString(org.hamcrest.Matchers.containsString) TransportException(org.opensearch.transport.TransportException) HandledTransportAction(org.opensearch.action.support.HandledTransportAction) ActionRunnable(org.opensearch.action.ActionRunnable) ThreadPool(org.opensearch.threadpool.ThreadPool) StreamOutput(org.opensearch.common.io.stream.StreamOutput) ResourceNotFoundException(org.opensearch.ResourceNotFoundException) InternalTestCluster(org.opensearch.test.InternalTestCluster) ActionRequestValidationException(org.opensearch.action.ActionRequestValidationException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListTasksResponse(org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse) Matchers.hasSize(org.hamcrest.Matchers.hasSize) StreamSupport(java.util.stream.StreamSupport) Before(org.junit.Before) StreamInput(org.opensearch.common.io.stream.StreamInput) Matchers.empty(org.hamcrest.Matchers.empty) SetOnce(org.apache.lucene.util.SetOnce) TaskId(org.opensearch.tasks.TaskId) TransportResponseHandler(org.opensearch.transport.TransportResponseHandler) IOException(java.io.IOException) Plugin(org.opensearch.plugins.Plugin) ActionFuture(org.opensearch.action.ActionFuture) TimeUnit(java.util.concurrent.TimeUnit) Sets(org.opensearch.common.util.set.Sets) TaskInfo(org.opensearch.tasks.TaskInfo) Collections(java.util.Collections) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) CancellableTask(org.opensearch.tasks.CancellableTask) Task(org.opensearch.tasks.Task) TaskId(org.opensearch.tasks.TaskId) CancelTasksResponse(org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse) TaskInfo(org.opensearch.tasks.TaskInfo) TaskManager(org.opensearch.tasks.TaskManager) CancellableTask(org.opensearch.tasks.CancellableTask) TransportService(org.opensearch.transport.TransportService) HashSet(java.util.HashSet)

Aggregations

Task (org.opensearch.tasks.Task)54 ActionListener (org.opensearch.action.ActionListener)24 ActionFilters (org.opensearch.action.support.ActionFilters)22 CountDownLatch (java.util.concurrent.CountDownLatch)21 IOException (java.io.IOException)20 TaskId (org.opensearch.tasks.TaskId)19 TransportService (org.opensearch.transport.TransportService)19 ThreadPool (org.opensearch.threadpool.ThreadPool)16 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)15 ArrayList (java.util.ArrayList)14 List (java.util.List)14 Map (java.util.Map)14 NodeClient (org.opensearch.client.node.NodeClient)13 ClusterService (org.opensearch.cluster.service.ClusterService)12 HashSet (java.util.HashSet)11 Matchers.containsString (org.hamcrest.Matchers.containsString)11 OpenSearchException (org.opensearch.OpenSearchException)11 HashMap (java.util.HashMap)10 TimeUnit (java.util.concurrent.TimeUnit)10 Inject (org.opensearch.common.inject.Inject)10