Search in sources :

Example 1 with AtomicArray

use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.

the class TransportBulkAction method doInternalExecute.

protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener<BulkResponse> listener) {
    final long startTime = relativeTime();
    final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
    boolean hasIndexRequestsWithPipelines = false;
    final Metadata metadata = clusterService.state().getMetadata();
    final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion();
    for (DocWriteRequest<?> actionRequest : bulkRequest.requests) {
        IndexRequest indexRequest = getIndexWriteRequest(actionRequest);
        if (indexRequest != null) {
            // Each index request needs to be evaluated, because this method also modifies the IndexRequest
            boolean indexRequestHasPipeline = IngestService.resolvePipelines(actionRequest, indexRequest, metadata);
            hasIndexRequestsWithPipelines |= indexRequestHasPipeline;
        }
        if (actionRequest instanceof IndexRequest) {
            IndexRequest ir = (IndexRequest) actionRequest;
            ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion);
            if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) {
                throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally");
            }
        }
    }
    if (hasIndexRequestsWithPipelines) {
        // this path is never taken.
        try {
            if (Assertions.ENABLED) {
                final boolean arePipelinesResolved = bulkRequest.requests().stream().map(TransportBulkAction::getIndexWriteRequest).filter(Objects::nonNull).allMatch(IndexRequest::isPipelineResolved);
                assert arePipelinesResolved : bulkRequest;
            }
            if (clusterService.localNode().isIngestNode()) {
                processBulkIndexIngestRequest(task, bulkRequest, executorName, listener);
            } else {
                ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener);
            }
        } catch (Exception e) {
            listener.onFailure(e);
        }
        return;
    }
    final boolean includesSystem = includesSystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices);
    if (includesSystem || needToCheck()) {
        // Attempt to create all the indices that we're going to need during the bulk before we start.
        // Step 1: collect all the indices in the request
        final Map<String, Boolean> indices = bulkRequest.requests.stream().filter(request -> request.opType() != DocWriteRequest.OpType.DELETE || request.versionType() == VersionType.EXTERNAL || request.versionType() == VersionType.EXTERNAL_GTE).collect(Collectors.toMap(DocWriteRequest::index, DocWriteRequest::isRequireAlias, (v1, v2) -> v1 || v2));
        /* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
             * that we'll use when we try to run the requests. */
        final Map<String, IndexNotFoundException> indicesThatCannotBeCreated = new HashMap<>();
        Set<String> autoCreateIndices = new HashSet<>();
        ClusterState state = clusterService.state();
        for (Map.Entry<String, Boolean> indexAndFlag : indices.entrySet()) {
            boolean shouldAutoCreate;
            final String index = indexAndFlag.getKey();
            try {
                shouldAutoCreate = shouldAutoCreate(index, state);
            } catch (IndexNotFoundException e) {
                shouldAutoCreate = false;
                indicesThatCannotBeCreated.put(index, e);
            }
            // We should only auto create if we are not requiring it to be an alias
            if (shouldAutoCreate && (indexAndFlag.getValue() == false)) {
                autoCreateIndices.add(index);
            }
        }
        // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back.
        if (autoCreateIndices.isEmpty()) {
            executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
        } else {
            final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
            for (String index : autoCreateIndices) {
                createIndex(index, bulkRequest.timeout(), minNodeVersion, new ActionListener<CreateIndexResponse>() {

                    @Override
                    public void onResponse(CreateIndexResponse result) {
                        if (counter.decrementAndGet() == 0) {
                            threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(listener) {

                                @Override
                                protected void doRun() {
                                    executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
                                }
                            });
                        }
                    }

                    @Override
                    public void onFailure(Exception e) {
                        if (!(ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException)) {
                            // fail all requests involving this index, if create didn't work
                            for (int i = 0; i < bulkRequest.requests.size(); i++) {
                                DocWriteRequest<?> request = bulkRequest.requests.get(i);
                                if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
                                    bulkRequest.requests.set(i, null);
                                }
                            }
                        }
                        if (counter.decrementAndGet() == 0) {
                            final ActionListener<BulkResponse> wrappedListener = ActionListener.wrap(listener::onResponse, inner -> {
                                inner.addSuppressed(e);
                                listener.onFailure(inner);
                            });
                            threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(wrappedListener) {

                                @Override
                                protected void doRun() {
                                    executeBulk(task, bulkRequest, startTime, wrappedListener, responses, indicesThatCannotBeCreated);
                                }

                                @Override
                                public void onRejection(Exception rejectedException) {
                                    rejectedException.addSuppressed(e);
                                    super.onRejection(rejectedException);
                                }
                            });
                        }
                    }
                });
            }
        }
    } else {
        executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap());
    }
}
Also used : SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) IndexAbstraction(org.opensearch.cluster.metadata.IndexAbstraction) Metadata(org.opensearch.cluster.metadata.Metadata) LongSupplier(java.util.function.LongSupplier) DataStream(org.opensearch.cluster.metadata.DataStream) Version(org.opensearch.Version) TransportUpdateAction(org.opensearch.action.update.TransportUpdateAction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) Assertions(org.opensearch.Assertions) Map(java.util.Map) NodeClosedException(org.opensearch.node.NodeClosedException) AutoCreateAction(org.opensearch.action.admin.indices.create.AutoCreateAction) Inject(org.opensearch.common.inject.Inject) ActionListener(org.opensearch.action.ActionListener) AtomicIntegerArray(java.util.concurrent.atomic.AtomicIntegerArray) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) TimeValue(org.opensearch.common.unit.TimeValue) NodeClient(org.opensearch.client.node.NodeClient) Index(org.opensearch.index.Index) IndexingPressureService(org.opensearch.index.IndexingPressureService) OpenSearchParseException(org.opensearch.OpenSearchParseException) ExceptionsHelper(org.opensearch.ExceptionsHelper) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) Set(java.util.Set) Task(org.opensearch.tasks.Task) TransportService(org.opensearch.transport.TransportService) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) ActionFilters(org.opensearch.action.support.ActionFilters) VersionType(org.opensearch.index.VersionType) List(java.util.List) Logger(org.apache.logging.log4j.Logger) SparseFixedBitSet(org.apache.lucene.util.SparseFixedBitSet) EXCLUDED_DATA_STREAMS_KEY(org.opensearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) DocWriteResponse(org.opensearch.action.DocWriteResponse) UpdateRequest(org.opensearch.action.update.UpdateRequest) SortedMap(java.util.SortedMap) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) Names(org.opensearch.threadpool.ThreadPool.Names) MappingMetadata(org.opensearch.cluster.metadata.MappingMetadata) HandledTransportAction(org.opensearch.action.support.HandledTransportAction) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) UpdateResponse(org.opensearch.action.update.UpdateResponse) ThreadPool(org.opensearch.threadpool.ThreadPool) RoutingMissingException(org.opensearch.action.RoutingMissingException) DocWriteRequest(org.opensearch.action.DocWriteRequest) HashMap(java.util.HashMap) Releasable(org.opensearch.common.lease.Releasable) ArrayList(java.util.ArrayList) AutoCreateIndex(org.opensearch.action.support.AutoCreateIndex) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) UNASSIGNED_SEQ_NO(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO) LegacyESVersion(org.opensearch.LegacyESVersion) IndexClosedException(org.opensearch.indices.IndexClosedException) ClusterStateObserver(org.opensearch.cluster.ClusterStateObserver) Collections.emptyMap(java.util.Collections.emptyMap) IngestService(org.opensearch.ingest.IngestService) Iterator(java.util.Iterator) IngestActionForwarder(org.opensearch.action.ingest.IngestActionForwarder) ClusterBlockLevel(org.opensearch.cluster.block.ClusterBlockLevel) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) CreateIndexResponse(org.opensearch.action.admin.indices.create.CreateIndexResponse) UNASSIGNED_PRIMARY_TERM(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) SystemIndices(org.opensearch.indices.SystemIndices) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ClusterService(org.opensearch.cluster.service.ClusterService) IndexRequest(org.opensearch.action.index.IndexRequest) LogManager(org.apache.logging.log4j.LogManager) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ActionRunnable(org.opensearch.action.ActionRunnable) HashMap(java.util.HashMap) Metadata(org.opensearch.cluster.metadata.Metadata) MappingMetadata(org.opensearch.cluster.metadata.MappingMetadata) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) IndexRequest(org.opensearch.action.index.IndexRequest) Version(org.opensearch.Version) LegacyESVersion(org.opensearch.LegacyESVersion) CreateIndexResponse(org.opensearch.action.admin.indices.create.CreateIndexResponse) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) NodeClosedException(org.opensearch.node.NodeClosedException) OpenSearchParseException(org.opensearch.OpenSearchParseException) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) RoutingMissingException(org.opensearch.action.RoutingMissingException) IndexClosedException(org.opensearch.indices.IndexClosedException) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) ActionListener(org.opensearch.action.ActionListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) DocWriteRequest(org.opensearch.action.DocWriteRequest) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) Collections.emptyMap(java.util.Collections.emptyMap)

Example 2 with AtomicArray

use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.

the class TransportMultiTermVectorsAction method doExecute.

@Override
protected void doExecute(Task task, final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
    ClusterState clusterState = clusterService.state();
    clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
    final AtomicArray<MultiTermVectorsItemResponse> responses = new AtomicArray<>(request.requests.size());
    Map<ShardId, MultiTermVectorsShardRequest> shardRequests = new HashMap<>();
    for (int i = 0; i < request.requests.size(); i++) {
        TermVectorsRequest termVectorsRequest = request.requests.get(i);
        termVectorsRequest.routing(clusterState.metadata().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index()));
        if (!clusterState.metadata().hasConcreteIndex(termVectorsRequest.index())) {
            responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
            continue;
        }
        String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
        if (termVectorsRequest.routing() == null && clusterState.getMetadata().routingRequired(concreteSingleIndex)) {
            responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.id(), new RoutingMissingException(concreteSingleIndex, termVectorsRequest.id()))));
            continue;
        }
        ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, termVectorsRequest.id(), termVectorsRequest.routing());
        MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
        if (shardRequest == null) {
            shardRequest = new MultiTermVectorsShardRequest(shardId.getIndexName(), shardId.id());
            shardRequest.preference(request.preference);
            shardRequests.put(shardId, shardRequest);
        }
        shardRequest.add(i, termVectorsRequest);
    }
    if (shardRequests.size() == 0) {
        // only failures..
        listener.onResponse(new MultiTermVectorsResponse(responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));
    }
    executeShardAction(listener, responses, shardRequests);
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) HashMap(java.util.HashMap) ShardId(org.opensearch.index.shard.ShardId) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) RoutingMissingException(org.opensearch.action.RoutingMissingException)

Example 3 with AtomicArray

use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.

the class ClearScrollControllerTests method testClearScrollIdsWithFailure.

public void testClearScrollIdsWithFailure() throws IOException, InterruptedException {
    DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
    DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
    DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT);
    AtomicArray<SearchPhaseResult> array = new AtomicArray<>(3);
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 1), node1);
    testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null));
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 12), node2);
    testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null));
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 42), node3);
    testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null));
    array.setOnce(0, testSearchPhaseResult1);
    array.setOnce(1, testSearchPhaseResult2);
    array.setOnce(2, testSearchPhaseResult3);
    AtomicInteger numFreed = new AtomicInteger(0);
    AtomicInteger numFailures = new AtomicInteger(0);
    AtomicInteger numConnectionFailures = new AtomicInteger(0);
    String scrollId = TransportSearchHelper.buildScrollId(array, VersionUtils.randomVersion(random()));
    DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build();
    CountDownLatch latch = new CountDownLatch(1);
    ActionListener<ClearScrollResponse> listener = new LatchedActionListener<>(new ActionListener<ClearScrollResponse>() {

        @Override
        public void onResponse(ClearScrollResponse clearScrollResponse) {
            assertEquals(numFreed.get(), clearScrollResponse.getNumFreed());
            if (numFailures.get() > 0) {
                assertFalse(clearScrollResponse.isSucceeded());
            } else {
                assertTrue(clearScrollResponse.isSucceeded());
            }
        }

        @Override
        public void onFailure(Exception e) {
            throw new AssertionError(e);
        }
    }, latch);
    List<DiscoveryNode> nodesInvoked = new CopyOnWriteArrayList<>();
    SearchTransportService searchTransportService = new SearchTransportService(null, null) {

        @Override
        public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, ActionListener<SearchFreeContextResponse> listener) {
            nodesInvoked.add(connection.getNode());
            boolean freed = randomBoolean();
            boolean fail = randomBoolean();
            Thread t = new Thread(() -> {
                if (fail) {
                    numFailures.incrementAndGet();
                    listener.onFailure(new IllegalArgumentException("boom"));
                } else {
                    if (freed) {
                        numFreed.incrementAndGet();
                    }
                    listener.onResponse(new SearchFreeContextResponse(freed));
                }
            });
            t.start();
        }

        @Override
        public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) {
            if (randomBoolean()) {
                numFailures.incrementAndGet();
                numConnectionFailures.incrementAndGet();
                throw new NodeNotConnectedException(node, "boom");
            }
            return new SearchAsyncActionTests.MockConnection(node);
        }
    };
    ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
    clearScrollRequest.scrollIds(Arrays.asList(scrollId));
    ClearScrollController controller = new ClearScrollController(clearScrollRequest, listener, nodes, logger, searchTransportService);
    controller.run();
    latch.await();
    assertEquals(3 - numConnectionFailures.get(), nodesInvoked.size());
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ShardId(org.opensearch.index.shard.ShardId) LatchedActionListener(org.opensearch.action.LatchedActionListener) NodeNotConnectedException(org.opensearch.transport.NodeNotConnectedException) SearchPhaseResult(org.opensearch.search.SearchPhaseResult) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) CountDownLatch(java.util.concurrent.CountDownLatch) NodeNotConnectedException(org.opensearch.transport.NodeNotConnectedException) IOException(java.io.IOException) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) LatchedActionListener(org.opensearch.action.LatchedActionListener) ActionListener(org.opensearch.action.ActionListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SearchShardTarget(org.opensearch.search.SearchShardTarget) Transport(org.opensearch.transport.Transport) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 4 with AtomicArray

use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.

the class ClearScrollControllerTests method testClearScrollIds.

public void testClearScrollIds() throws IOException, InterruptedException {
    DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
    DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
    DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT);
    AtomicArray<SearchPhaseResult> array = new AtomicArray<>(3);
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 1), node1);
    testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null));
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 12), node2);
    testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null));
    SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 42), node3);
    testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null));
    array.setOnce(0, testSearchPhaseResult1);
    array.setOnce(1, testSearchPhaseResult2);
    array.setOnce(2, testSearchPhaseResult3);
    AtomicInteger numFreed = new AtomicInteger(0);
    String scrollId = TransportSearchHelper.buildScrollId(array, VersionUtils.randomVersion(random()));
    DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build();
    CountDownLatch latch = new CountDownLatch(1);
    ActionListener<ClearScrollResponse> listener = new LatchedActionListener<>(new ActionListener<ClearScrollResponse>() {

        @Override
        public void onResponse(ClearScrollResponse clearScrollResponse) {
            assertEquals(numFreed.get(), clearScrollResponse.getNumFreed());
            assertTrue(clearScrollResponse.isSucceeded());
        }

        @Override
        public void onFailure(Exception e) {
            throw new AssertionError(e);
        }
    }, latch);
    List<DiscoveryNode> nodesInvoked = new CopyOnWriteArrayList<>();
    SearchTransportService searchTransportService = new SearchTransportService(null, null) {

        @Override
        public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, ActionListener<SearchFreeContextResponse> listener) {
            nodesInvoked.add(connection.getNode());
            boolean freed = randomBoolean();
            if (freed) {
                numFreed.incrementAndGet();
            }
            Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(freed)));
            t.start();
        }

        @Override
        public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) {
            return new SearchAsyncActionTests.MockConnection(node);
        }
    };
    ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
    clearScrollRequest.scrollIds(Arrays.asList(scrollId));
    ClearScrollController controller = new ClearScrollController(clearScrollRequest, listener, nodes, logger, searchTransportService);
    controller.run();
    latch.await();
    assertEquals(3, nodesInvoked.size());
    Collections.sort(nodesInvoked, Comparator.comparing(DiscoveryNode::getId));
    assertEquals(nodesInvoked, Arrays.asList(node1, node2, node3));
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) ShardId(org.opensearch.index.shard.ShardId) LatchedActionListener(org.opensearch.action.LatchedActionListener) SearchPhaseResult(org.opensearch.search.SearchPhaseResult) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) CountDownLatch(java.util.concurrent.CountDownLatch) NodeNotConnectedException(org.opensearch.transport.NodeNotConnectedException) IOException(java.io.IOException) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) LatchedActionListener(org.opensearch.action.LatchedActionListener) ActionListener(org.opensearch.action.ActionListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SearchShardTarget(org.opensearch.search.SearchShardTarget) Transport(org.opensearch.transport.Transport) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 5 with AtomicArray

use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.

the class CountedCollectorTests method testCollect.

public void testCollect() throws InterruptedException {
    ArraySearchPhaseResults<SearchPhaseResult> consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100));
    List<Integer> state = new ArrayList<>();
    int numResultsExpected = randomIntBetween(1, consumer.getAtomicArray().length());
    MockSearchPhaseContext context = new MockSearchPhaseContext(consumer.getAtomicArray().length());
    CountDownLatch latch = new CountDownLatch(1);
    boolean maybeFork = randomBoolean();
    Executor executor = (runnable) -> {
        if (randomBoolean() && maybeFork) {
            new Thread(runnable).start();
        } else {
            runnable.run();
        }
    };
    CountedCollector<SearchPhaseResult> collector = new CountedCollector<>(consumer, numResultsExpected, latch::countDown, context);
    for (int i = 0; i < numResultsExpected; i++) {
        int shardID = i;
        switch(randomIntBetween(0, 2)) {
            case 0:
                state.add(0);
                executor.execute(() -> collector.countDown());
                break;
            case 1:
                state.add(1);
                executor.execute(() -> {
                    DfsSearchResult dfsSearchResult = new DfsSearchResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), shardID), null, null);
                    dfsSearchResult.setShardIndex(shardID);
                    dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null, OriginalIndices.NONE));
                    collector.onResult(dfsSearchResult);
                });
                break;
            case 2:
                state.add(2);
                executor.execute(() -> collector.onFailure(shardID, new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null, OriginalIndices.NONE), new RuntimeException("boom")));
                break;
            default:
                fail("unknown state");
        }
    }
    latch.await();
    assertEquals(numResultsExpected, state.size());
    AtomicArray<SearchPhaseResult> results = consumer.getAtomicArray();
    for (int i = 0; i < numResultsExpected; i++) {
        switch(state.get(i)) {
            case 0:
                assertNull(results.get(i));
                break;
            case 1:
                assertNotNull(results.get(i));
                assertEquals(i, results.get(i).getContextId().getId());
                break;
            case 2:
                final int shardId = i;
                assertEquals(1, context.failures.stream().filter(f -> f.shardId() == shardId).count());
                break;
            default:
                fail("unknown state");
        }
    }
    for (int i = numResultsExpected; i < results.length(); i++) {
        assertNull("index: " + i, results.get(i));
    }
}
Also used : ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) Executor(java.util.concurrent.Executor) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) OriginalIndices(org.opensearch.action.OriginalIndices) ArrayList(java.util.ArrayList) ShardId(org.opensearch.index.shard.ShardId) CountDownLatch(java.util.concurrent.CountDownLatch) DfsSearchResult(org.opensearch.search.dfs.DfsSearchResult) List(java.util.List) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) SearchShardTarget(org.opensearch.search.SearchShardTarget) SearchPhaseResult(org.opensearch.search.SearchPhaseResult) UUIDs(org.opensearch.common.UUIDs) DfsSearchResult(org.opensearch.search.dfs.DfsSearchResult) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) ShardId(org.opensearch.index.shard.ShardId) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) Executor(java.util.concurrent.Executor) SearchPhaseResult(org.opensearch.search.SearchPhaseResult) SearchShardTarget(org.opensearch.search.SearchShardTarget)

Aggregations

AtomicArray (org.opensearch.common.util.concurrent.AtomicArray)25 ShardId (org.opensearch.index.shard.ShardId)16 ShardSearchContextId (org.opensearch.search.internal.ShardSearchContextId)14 SearchShardTarget (org.opensearch.search.SearchShardTarget)13 IOException (java.io.IOException)11 CountDownLatch (java.util.concurrent.CountDownLatch)11 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)10 ActionListener (org.opensearch.action.ActionListener)10 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)9 Transport (org.opensearch.transport.Transport)8 DiscoveryNodes (org.opensearch.cluster.node.DiscoveryNodes)7 ArrayList (java.util.ArrayList)6 SearchPhaseResult (org.opensearch.search.SearchPhaseResult)6 BiFunction (java.util.function.BiFunction)5 ScoreDoc (org.apache.lucene.search.ScoreDoc)5 TotalHits (org.apache.lucene.search.TotalHits)5 ClusterState (org.opensearch.cluster.ClusterState)5 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)4 TopDocs (org.apache.lucene.search.TopDocs)4 TopDocsAndMaxScore (org.opensearch.common.lucene.search.TopDocsAndMaxScore)4