Search in sources :

Example 11 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project job-scheduler by opensearch-project.

the class LockService method createLockIndex.

@VisibleForTesting
void createLockIndex(ActionListener<Boolean> listener) {
    if (lockIndexExist()) {
        listener.onResponse(true);
    } else {
        final CreateIndexRequest request = new CreateIndexRequest(LOCK_INDEX_NAME).mapping(lockMapping());
        client.admin().indices().create(request, ActionListener.wrap(response -> listener.onResponse(response.isAcknowledged()), exception -> {
            if (exception instanceof ResourceAlreadyExistsException || exception.getCause() instanceof ResourceAlreadyExistsException) {
                listener.onResponse(true);
            } else {
                listener.onFailure(exception);
            }
        }));
    }
}
Also used : SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) ToXContent(org.opensearch.common.xcontent.ToXContent) XContentParser(org.opensearch.common.xcontent.XContentParser) XContentFactory(org.opensearch.common.xcontent.XContentFactory) ActionListener(org.opensearch.action.ActionListener) LockModel(org.opensearch.jobscheduler.spi.LockModel) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) DeleteRequest(org.opensearch.action.delete.DeleteRequest) Client(org.opensearch.client.Client) VersionConflictEngineException(org.opensearch.index.engine.VersionConflictEngineException) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) GetRequest(org.opensearch.action.get.GetRequest) LoggingDeprecationHandler(org.opensearch.common.xcontent.LoggingDeprecationHandler) IOException(java.io.IOException) DocumentMissingException(org.opensearch.index.engine.DocumentMissingException) Instant(java.time.Instant) ScheduledJobParameter(org.opensearch.jobscheduler.spi.ScheduledJobParameter) InputStreamReader(java.io.InputStreamReader) StandardCharsets(java.nio.charset.StandardCharsets) Logger(org.apache.logging.log4j.Logger) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) ClusterService(org.opensearch.cluster.service.ClusterService) VisibleForTesting(com.cronutils.utils.VisibleForTesting) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) DocWriteResponse(org.opensearch.action.DocWriteResponse) UpdateRequest(org.opensearch.action.update.UpdateRequest) XContentType(org.opensearch.common.xcontent.XContentType) JobExecutionContext(org.opensearch.jobscheduler.spi.JobExecutionContext) BufferedReader(java.io.BufferedReader) IndexRequest(org.opensearch.action.index.IndexRequest) LogManager(org.apache.logging.log4j.LogManager) InputStream(java.io.InputStream) ResourceAlreadyExistsException(org.opensearch.ResourceAlreadyExistsException) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) VisibleForTesting(com.cronutils.utils.VisibleForTesting)

Example 12 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class IndicesClusterStateServiceRandomUpdatesTests method randomlyUpdateClusterState.

public ClusterState randomlyUpdateClusterState(ClusterState state, Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap, Supplier<MockIndicesService> indicesServiceSupplier) {
    // randomly remove no_cluster_manager blocks
    if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
        state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)).build();
    }
    // randomly add no_cluster_manager blocks
    if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) {
        ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES;
        state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build();
    }
    // if no_cluster_manager block is in place, make no other cluster state changes
    if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
        return state;
    }
    // randomly create new indices (until we have 200 max)
    for (int i = 0; i < randomInt(5); i++) {
        if (state.metadata().indices().size() > 200) {
            break;
        }
        String name = "index_" + randomAlphaOfLength(15).toLowerCase(Locale.ROOT);
        Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3));
        if (randomBoolean()) {
            int min = randomInt(2);
            int max = min + randomInt(3);
            settingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, randomBoolean() ? min + "-" + max : min + "-all");
        } else {
            settingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
        }
        CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
        state = cluster.createIndex(state, request);
        assertTrue(state.metadata().hasIndex(name));
    }
    // randomly delete indices
    Set<String> indicesToDelete = new HashSet<>();
    int numberOfIndicesToDelete = randomInt(Math.min(2, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metadata().indices().keys().toArray(String.class))) {
        indicesToDelete.add(state.metadata().index(index).getIndex().getName());
    }
    if (indicesToDelete.isEmpty() == false) {
        DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
        state = cluster.deleteIndices(state, deleteRequest);
        for (String index : indicesToDelete) {
            assertFalse(state.metadata().hasIndex(index));
        }
    }
    // randomly close indices
    int numberOfIndicesToClose = randomInt(Math.min(1, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToClose, state.metadata().indices().keys().toArray(String.class))) {
        CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metadata().index(index).getIndex().getName());
        state = cluster.closeIndices(state, closeIndexRequest);
    }
    // randomly open indices
    int numberOfIndicesToOpen = randomInt(Math.min(1, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metadata().indices().keys().toArray(String.class))) {
        OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metadata().index(index).getIndex().getName());
        state = cluster.openIndices(state, openIndexRequest);
    }
    // randomly update settings
    Set<String> indicesToUpdate = new HashSet<>();
    boolean containsClosedIndex = false;
    int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metadata().indices().keys().toArray(String.class))) {
        indicesToUpdate.add(state.metadata().index(index).getIndex().getName());
        if (state.metadata().index(index).getState() == IndexMetadata.State.CLOSE) {
            containsClosedIndex = true;
        }
    }
    if (indicesToUpdate.isEmpty() == false) {
        UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
        Settings.Builder settings = Settings.builder();
        if (containsClosedIndex == false) {
            settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
        }
        settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
        updateSettingsRequest.settings(settings.build());
        state = cluster.updateSettings(state, updateSettingsRequest);
    }
    // randomly reroute
    if (rarely()) {
        state = cluster.reroute(state, new ClusterRerouteRequest());
    }
    // randomly start and fail allocated shards
    final Map<ShardRouting, Long> startedShards = new HashMap<>();
    List<FailedShard> failedShards = new ArrayList<>();
    for (DiscoveryNode node : state.nodes()) {
        IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
        MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
        for (MockIndexService indexService : indicesService) {
            for (MockIndexShard indexShard : indexService) {
                ShardRouting persistedShardRouting = indexShard.routingEntry();
                if (persistedShardRouting.initializing() && randomBoolean()) {
                    startedShards.put(persistedShardRouting, indexShard.term());
                } else if (rarely()) {
                    failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception(), randomBoolean()));
                }
            }
        }
    }
    state = cluster.applyFailedShards(state, failedShards);
    state = cluster.applyStartedShards(state, startedShards);
    // randomly add and remove nodes (except current cluster-manager)
    if (rarely()) {
        if (randomBoolean()) {
            // add node
            if (state.nodes().getSize() < 10) {
                state = cluster.addNodes(state, Collections.singletonList(createNode()));
                updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
            }
        } else {
            // remove node
            if (state.nodes().getDataNodes().size() > 3) {
                DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
                if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
                    state = cluster.removeNodes(state, Collections.singletonList(discoveryNode));
                    updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
                }
                if (randomBoolean()) {
                    // and add it back
                    state = cluster.addNodes(state, Collections.singletonList(discoveryNode));
                    updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
                }
            }
        }
    }
    return state;
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) UpdateSettingsRequest(org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DeleteIndexRequest(org.opensearch.action.admin.indices.delete.DeleteIndexRequest) ClusterBlock(org.opensearch.cluster.block.ClusterBlock) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) Settings(org.opensearch.common.settings.Settings) HashSet(java.util.HashSet) OpenIndexRequest(org.opensearch.action.admin.indices.open.OpenIndexRequest) FailedShard(org.opensearch.cluster.routing.allocation.FailedShard) CloseIndexRequest(org.opensearch.action.admin.indices.close.CloseIndexRequest) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) ShardRouting(org.opensearch.cluster.routing.ShardRouting)

Example 13 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class TransportReplicationActionTests method testClosedIndexOnReroute.

public void testClosedIndexOnReroute() {
    final String index = "test";
    // no replicas in oder to skip the replication part
    ClusterStateChanges clusterStateChanges = new ClusterStateChanges(xContentRegistry(), threadPool);
    setState(clusterService, clusterStateChanges.closeIndices(clusterStateChanges.createIndex(clusterService.state(), new CreateIndexRequest(index)), new CloseIndexRequest(index)));
    assertThat(clusterService.state().metadata().indices().get(index).getState(), equalTo(IndexMetadata.State.CLOSE));
    logger.debug("--> using initial state:\n{}", clusterService.state());
    Request request = new Request(new ShardId(clusterService.state().metadata().indices().get(index).getIndex(), 0)).timeout("1ms");
    PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
    ReplicationTask task = maybeTask();
    TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", transportService, clusterService, shardStateAction, threadPool);
    TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
    reroutePhase.run();
    assertListenerThrows("must throw index closed exception", listener, IndexClosedException.class);
    assertPhase(task, "failed");
    assertFalse(request.isRetrySet.get());
}
Also used : CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) CloseIndexRequest(org.opensearch.action.admin.indices.close.CloseIndexRequest) TransportRequest(org.opensearch.transport.TransportRequest) Matchers.hasToString(org.hamcrest.Matchers.hasToString) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Mockito.anyString(org.mockito.Mockito.anyString) ClusterStateChanges(org.opensearch.indices.cluster.ClusterStateChanges) ShardId(org.opensearch.index.shard.ShardId) CloseIndexRequest(org.opensearch.action.admin.indices.close.CloseIndexRequest) PlainActionFuture(org.opensearch.action.support.PlainActionFuture) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest)

Example 14 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class AutoExpandReplicasTests method testAutoExpandWhenNodeLeavesAndPossiblyRejoins.

/**
 * Checks that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on
 * the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node.
 * Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only
 * triggers in a follow-up step.
 */
public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException {
    final ThreadPool threadPool = new TestThreadPool(getClass().getName());
    final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
    try {
        List<DiscoveryNode> allNodes = new ArrayList<>();
        // local node is the cluster-manager
        DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
        allNodes.add(localNode);
        int numDataNodes = randomIntBetween(3, 5);
        List<DiscoveryNode> dataNodes = new ArrayList<>(numDataNodes);
        for (int i = 0; i < numDataNodes; i++) {
            dataNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE));
        }
        allNodes.addAll(dataNodes);
        ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0]));
        CreateIndexRequest request = new CreateIndexRequest("index", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build()).waitForActiveShards(ActiveShardCount.NONE);
        state = cluster.createIndex(state, request);
        assertTrue(state.metadata().hasIndex("index"));
        while (state.routingTable().index("index").shard(0).allShardsStarted() == false) {
            logger.info(state);
            state = cluster.applyStartedShards(state, state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING));
            state = cluster.reroute(state, new ClusterRerouteRequest());
        }
        IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);
        final Set<String> unchangedNodeIds;
        final IndexShardRoutingTable postTable;
        if (randomBoolean()) {
            // simulate node removal
            List<DiscoveryNode> nodesToRemove = randomSubsetOf(2, dataNodes);
            unchangedNodeIds = dataNodes.stream().filter(n -> nodesToRemove.contains(n) == false).map(DiscoveryNode::getId).collect(Collectors.toSet());
            state = cluster.removeNodes(state, nodesToRemove);
            postTable = state.routingTable().index("index").shard(0);
            assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted());
            assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(is(in(preTable.getAllAllocationIds()))));
        } else {
            // fake an election where conflicting nodes are removed and readded
            state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build();
            List<DiscoveryNode> conflictingNodes = randomSubsetOf(2, dataNodes);
            unchangedNodeIds = dataNodes.stream().filter(n -> conflictingNodes.contains(n) == false).map(DiscoveryNode::getId).collect(Collectors.toSet());
            List<DiscoveryNode> nodesToAdd = conflictingNodes.stream().map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())).collect(Collectors.toList());
            if (randomBoolean()) {
                nodesToAdd.add(createNode(DiscoveryNodeRole.DATA_ROLE));
            }
            state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd);
            postTable = state.routingTable().index("index").shard(0);
        }
        Set<String> unchangedAllocationIds = preTable.getShards().stream().filter(shr -> unchangedNodeIds.contains(shr.currentNodeId())).map(shr -> shr.allocationId().getId()).collect(Collectors.toSet());
        assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getAllAllocationIds()))));
        postTable.getShards().forEach(shardRouting -> {
            if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) {
                assertTrue("Shard should be active: " + shardRouting, shardRouting.active());
            }
        });
    } finally {
        terminate(threadPool);
    }
}
Also used : DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) UpdateSettingsRequest(org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest) SETTING_AUTO_EXPAND_REPLICAS(org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS) ThreadPool(org.opensearch.threadpool.ThreadPool) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ClusterStateChanges(org.opensearch.indices.cluster.ClusterStateChanges) Version(org.opensearch.Version) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) ClusterStateCreationUtils(org.opensearch.action.support.replication.ClusterStateCreationUtils) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) Matchers.everyItem(org.hamcrest.Matchers.everyItem) VersionUtils(org.opensearch.test.VersionUtils) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) ShardRoutingState(org.opensearch.cluster.routing.ShardRoutingState) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) DiscoveryNodeRole(org.opensearch.cluster.node.DiscoveryNodeRole) Collectors(java.util.stream.Collectors) ActiveShardCount(org.opensearch.action.support.ActiveShardCount) List(java.util.List) Matchers.equalTo(org.hamcrest.Matchers.equalTo) SETTING_NUMBER_OF_SHARDS(org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS) Matchers.is(org.hamcrest.Matchers.is) Collections(java.util.Collections) Matchers.in(org.hamcrest.Matchers.in) ClusterState(org.opensearch.cluster.ClusterState) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) ThreadPool(org.opensearch.threadpool.ThreadPool) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ArrayList(java.util.ArrayList) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ClusterStateChanges(org.opensearch.indices.cluster.ClusterStateChanges) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest)

Example 15 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class FailedNodeRoutingTests method testRandomClusterPromotesNewestReplica.

public void testRandomClusterPromotesNewestReplica() throws InterruptedException {
    ThreadPool threadPool = new TestThreadPool(getClass().getName());
    ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
    ClusterState state = randomInitialClusterState();
    // randomly add nodes of mixed versions
    logger.info("--> adding random nodes");
    for (int i = 0; i < randomIntBetween(4, 8); i++) {
        DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
        state = ClusterState.builder(state).nodes(newNodes).build();
        // always reroute after adding node
        state = cluster.reroute(state, new ClusterRerouteRequest());
    }
    // Log the node versions (for debugging if necessary)
    for (ObjectCursor<DiscoveryNode> cursor : state.nodes().getDataNodes().values()) {
        Version nodeVer = cursor.value.getVersion();
        logger.info("--> node [{}] has version [{}]", cursor.value.getId(), nodeVer);
    }
    // randomly create some indices
    logger.info("--> creating some indices");
    for (int i = 0; i < randomIntBetween(2, 5); i++) {
        String name = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
        Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)).put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4));
        CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
        state = cluster.createIndex(state, request);
        assertTrue(state.metadata().hasIndex(name));
    }
    logger.info("--> starting shards");
    state = cluster.applyStartedShards(state, state.getRoutingNodes().shardsWithState(INITIALIZING));
    logger.info("--> starting replicas a random number of times");
    for (int i = 0; i < randomIntBetween(1, 10); i++) {
        state = cluster.applyStartedShards(state, state.getRoutingNodes().shardsWithState(INITIALIZING));
    }
    boolean keepGoing = true;
    while (keepGoing) {
        List<ShardRouting> primaries = state.getRoutingNodes().shardsWithState(STARTED).stream().filter(ShardRouting::primary).collect(Collectors.toList());
        // Pick a random subset of primaries to fail
        List<FailedShard> shardsToFail = new ArrayList<>();
        List<ShardRouting> failedPrimaries = randomSubsetOf(primaries);
        failedPrimaries.stream().forEach(sr -> {
            shardsToFail.add(new FailedShard(randomFrom(sr), "failed primary", new Exception(), randomBoolean()));
        });
        logger.info("--> state before failing shards: {}", state);
        state = cluster.applyFailedShards(state, shardsToFail);
        final ClusterState compareState = state;
        failedPrimaries.forEach(shardRouting -> {
            logger.info("--> verifying version for {}", shardRouting);
            ShardRouting newPrimary = compareState.routingTable().index(shardRouting.index()).shard(shardRouting.id()).primaryShard();
            Version newPrimaryVersion = getNodeVersion(newPrimary, compareState);
            logger.info("--> new primary is on version {}: {}", newPrimaryVersion, newPrimary);
            compareState.routingTable().shardRoutingTable(newPrimary.shardId()).shardsWithState(STARTED).stream().forEach(sr -> {
                Version candidateVer = getNodeVersion(sr, compareState);
                if (candidateVer != null) {
                    logger.info("--> candidate on {} node; shard routing: {}", candidateVer, sr);
                    assertTrue("candidate was not on the newest version, new primary is on " + newPrimaryVersion + " and there is a candidate on " + candidateVer, candidateVer.onOrBefore(newPrimaryVersion));
                }
            });
        });
        keepGoing = randomBoolean();
    }
    terminate(threadPool);
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) ThreadPool(org.opensearch.threadpool.ThreadPool) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ArrayList(java.util.ArrayList) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ClusterStateChanges(org.opensearch.indices.cluster.ClusterStateChanges) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) Version(org.opensearch.Version) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) ShardRouting(org.opensearch.cluster.routing.ShardRouting) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) Settings(org.opensearch.common.settings.Settings)

Aggregations

CreateIndexRequest (org.opensearch.action.admin.indices.create.CreateIndexRequest)79 IndexRequest (org.opensearch.action.index.IndexRequest)34 Settings (org.opensearch.common.settings.Settings)31 Client (org.opensearch.client.Client)29 Test (org.junit.Test)27 SingleClusterTest (org.opensearch.security.test.SingleClusterTest)22 RestHelper (org.opensearch.security.test.helper.rest.RestHelper)19 IOException (java.io.IOException)18 HttpResponse (org.opensearch.security.test.helper.rest.RestHelper.HttpResponse)15 CreateIndexResponse (org.opensearch.action.admin.indices.create.CreateIndexResponse)13 DynamicSecurityConfig (org.opensearch.security.test.DynamicSecurityConfig)13 IndicesAliasesRequest (org.opensearch.action.admin.indices.alias.IndicesAliasesRequest)12 ActionListener (org.opensearch.action.ActionListener)11 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)10 ThreadPool (org.opensearch.threadpool.ThreadPool)10 Alias (org.opensearch.action.admin.indices.alias.Alias)9 ClusterState (org.opensearch.cluster.ClusterState)9 Map (java.util.Map)8 ArrayList (java.util.ArrayList)7 HashMap (java.util.HashMap)7