Search in sources :

Example 1 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class TransportBulkAction method createIndex.

void createIndex(String index, TimeValue timeout, Version minNodeVersion, ActionListener<CreateIndexResponse> listener) {
    CreateIndexRequest createIndexRequest = new CreateIndexRequest();
    createIndexRequest.index(index);
    createIndexRequest.cause("auto(bulk api)");
    createIndexRequest.masterNodeTimeout(timeout);
    if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_8_0)) {
        client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener);
    } else {
        client.admin().indices().create(createIndexRequest, listener);
    }
}
Also used : CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest)

Example 2 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class TransportResizeAction method prepareCreateIndexRequest.

// static for unittesting this method
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest, final ClusterState state, final IntFunction<DocsStats> perShardDocStats, String sourceIndexName, String targetIndexName) {
    final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest();
    final IndexMetadata metadata = state.metadata().index(sourceIndexName);
    if (metadata == null) {
        throw new IndexNotFoundException(sourceIndexName);
    }
    final Settings.Builder targetIndexSettingsBuilder = Settings.builder().put(targetIndex.settings()).normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX);
    targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID);
    final Settings targetIndexSettings = targetIndexSettingsBuilder.build();
    final int numShards;
    if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
        numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
    } else {
        assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly";
        if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
            numShards = 1;
        } else {
            assert resizeRequest.getResizeType() == ResizeType.CLONE;
            numShards = metadata.getNumberOfShards();
        }
    }
    for (int i = 0; i < numShards; i++) {
        if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
            Set<ShardId> shardIds = IndexMetadata.selectShrinkShards(i, metadata, numShards);
            long count = 0;
            for (ShardId id : shardIds) {
                DocsStats docsStats = perShardDocStats.apply(id.id());
                if (docsStats != null) {
                    count += docsStats.getCount();
                }
                if (count > IndexWriter.MAX_DOCS) {
                    throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS + "] docs - too many documents in shards " + shardIds);
                }
            }
        } else if (resizeRequest.getResizeType() == ResizeType.SPLIT) {
            Objects.requireNonNull(IndexMetadata.selectSplitShard(i, metadata, numShards));
        // we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc.
        } else {
            Objects.requireNonNull(IndexMetadata.selectCloneShard(i, metadata, numShards));
        // we just execute this to ensure we get the right exceptions if the number of shards is wrong etc.
        }
    }
    if (IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
        throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index");
    }
    if (IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) {
        // if we have a source index with 1 shards it's legal to set this
        final boolean splitFromSingleShards = resizeRequest.getResizeType() == ResizeType.SPLIT && metadata.getNumberOfShards() == 1;
        if (splitFromSingleShards == false) {
            throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
        }
    }
    if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(metadata.getSettings()) && IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(targetIndexSettings) && IndexSettings.INDEX_SOFT_DELETES_SETTING.get(targetIndexSettings) == false) {
        throw new IllegalArgumentException("Can't disable [index.soft_deletes.enabled] setting on resize");
    }
    String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index";
    targetIndex.cause(cause);
    Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);
    settingsBuilder.put("index.number_of_shards", numShards);
    targetIndex.settings(settingsBuilder);
    return new CreateIndexClusterStateUpdateRequest(cause, targetIndex.index(), targetIndexName).ackTimeout(targetIndex.timeout()).masterNodeTimeout(targetIndex.masterNodeTimeout()).settings(targetIndex.settings()).aliases(targetIndex.aliases()).waitForActiveShards(targetIndex.waitForActiveShards()).recoverFrom(metadata.getIndex()).resizeType(resizeRequest.getResizeType()).copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
}
Also used : ShardId(org.opensearch.index.shard.ShardId) CreateIndexClusterStateUpdateRequest(org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest) IndexNotFoundException(org.opensearch.index.IndexNotFoundException) DocsStats(org.opensearch.index.shard.DocsStats) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 3 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class MetadataRolloverServiceTests method testAliasValidation.

public void testAliasValidation() {
    String index1 = randomAlphaOfLength(10);
    String aliasWithWriteIndex = randomAlphaOfLength(10);
    String index2 = randomAlphaOfLength(10);
    String aliasWithNoWriteIndex = randomAlphaOfLength(10);
    Boolean firstIsWriteIndex = randomFrom(false, null);
    final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build();
    Metadata.Builder metadataBuilder = Metadata.builder().put(IndexMetadata.builder(index1).settings(settings).putAlias(AliasMetadata.builder(aliasWithWriteIndex)).putAlias(AliasMetadata.builder(aliasWithNoWriteIndex).writeIndex(firstIsWriteIndex)));
    IndexMetadata.Builder indexTwoBuilder = IndexMetadata.builder(index2).settings(settings);
    if (firstIsWriteIndex == null) {
        indexTwoBuilder.putAlias(AliasMetadata.builder(aliasWithNoWriteIndex).writeIndex(randomFrom(false, null)));
    }
    metadataBuilder.put(indexTwoBuilder);
    Metadata metadata = metadataBuilder.build();
    CreateIndexRequest req = new CreateIndexRequest();
    IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> MetadataRolloverService.validate(metadata, aliasWithNoWriteIndex, randomAlphaOfLength(5), req));
    assertThat(exception.getMessage(), equalTo("rollover target [" + aliasWithNoWriteIndex + "] does not point to a write index"));
    exception = expectThrows(IllegalArgumentException.class, () -> MetadataRolloverService.validate(metadata, randomFrom(index1, index2), randomAlphaOfLength(5), req));
    assertThat(exception.getMessage(), equalTo("rollover target is a [concrete index] but one of [alias,data_stream] was expected"));
    final String aliasName = randomAlphaOfLength(5);
    exception = expectThrows(IllegalArgumentException.class, () -> MetadataRolloverService.validate(metadata, aliasName, randomAlphaOfLength(5), req));
    assertThat(exception.getMessage(), equalTo("rollover target [" + aliasName + "] does not exist"));
    MetadataRolloverService.validate(metadata, aliasWithWriteIndex, randomAlphaOfLength(5), req);
}
Also used : Metadata(org.opensearch.cluster.metadata.Metadata) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AliasMetadata(org.opensearch.cluster.metadata.AliasMetadata) IndexTemplateMetadata(org.opensearch.cluster.metadata.IndexTemplateMetadata) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) IndexScopedSettings(org.opensearch.common.settings.IndexScopedSettings) Settings(org.opensearch.common.settings.Settings)

Example 4 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class FailedNodeRoutingTests method testRandomClusterPromotesNewestReplica.

public void testRandomClusterPromotesNewestReplica() throws InterruptedException {
    ThreadPool threadPool = new TestThreadPool(getClass().getName());
    ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
    ClusterState state = randomInitialClusterState();
    // randomly add nodes of mixed versions
    logger.info("--> adding random nodes");
    for (int i = 0; i < randomIntBetween(4, 8); i++) {
        DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
        state = ClusterState.builder(state).nodes(newNodes).build();
        // always reroute after adding node
        state = cluster.reroute(state, new ClusterRerouteRequest());
    }
    // Log the node versions (for debugging if necessary)
    for (ObjectCursor<DiscoveryNode> cursor : state.nodes().getDataNodes().values()) {
        Version nodeVer = cursor.value.getVersion();
        logger.info("--> node [{}] has version [{}]", cursor.value.getId(), nodeVer);
    }
    // randomly create some indices
    logger.info("--> creating some indices");
    for (int i = 0; i < randomIntBetween(2, 5); i++) {
        String name = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
        Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)).put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4));
        CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
        state = cluster.createIndex(state, request);
        assertTrue(state.metadata().hasIndex(name));
    }
    logger.info("--> starting shards");
    state = cluster.applyStartedShards(state, state.getRoutingNodes().shardsWithState(INITIALIZING));
    logger.info("--> starting replicas a random number of times");
    for (int i = 0; i < randomIntBetween(1, 10); i++) {
        state = cluster.applyStartedShards(state, state.getRoutingNodes().shardsWithState(INITIALIZING));
    }
    boolean keepGoing = true;
    while (keepGoing) {
        List<ShardRouting> primaries = state.getRoutingNodes().shardsWithState(STARTED).stream().filter(ShardRouting::primary).collect(Collectors.toList());
        // Pick a random subset of primaries to fail
        List<FailedShard> shardsToFail = new ArrayList<>();
        List<ShardRouting> failedPrimaries = randomSubsetOf(primaries);
        failedPrimaries.stream().forEach(sr -> {
            shardsToFail.add(new FailedShard(randomFrom(sr), "failed primary", new Exception(), randomBoolean()));
        });
        logger.info("--> state before failing shards: {}", state);
        state = cluster.applyFailedShards(state, shardsToFail);
        final ClusterState compareState = state;
        failedPrimaries.forEach(shardRouting -> {
            logger.info("--> verifying version for {}", shardRouting);
            ShardRouting newPrimary = compareState.routingTable().index(shardRouting.index()).shard(shardRouting.id()).primaryShard();
            Version newPrimaryVersion = getNodeVersion(newPrimary, compareState);
            logger.info("--> new primary is on version {}: {}", newPrimaryVersion, newPrimary);
            compareState.routingTable().shardRoutingTable(newPrimary.shardId()).shardsWithState(STARTED).stream().forEach(sr -> {
                Version candidateVer = getNodeVersion(sr, compareState);
                if (candidateVer != null) {
                    logger.info("--> candidate on {} node; shard routing: {}", candidateVer, sr);
                    assertTrue("candidate was not on the newest version, new primary is on " + newPrimaryVersion + " and there is a candidate on " + candidateVer, candidateVer.onOrBefore(newPrimaryVersion));
                }
            });
        });
        keepGoing = randomBoolean();
    }
    terminate(threadPool);
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) ThreadPool(org.opensearch.threadpool.ThreadPool) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ArrayList(java.util.ArrayList) TestThreadPool(org.opensearch.threadpool.TestThreadPool) ClusterStateChanges(org.opensearch.indices.cluster.ClusterStateChanges) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) Version(org.opensearch.Version) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) ShardRouting(org.opensearch.cluster.routing.ShardRouting) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) Settings(org.opensearch.common.settings.Settings)

Example 5 with CreateIndexRequest

use of org.opensearch.action.admin.indices.create.CreateIndexRequest in project OpenSearch by opensearch-project.

the class IndicesClusterStateServiceRandomUpdatesTests method randomlyUpdateClusterState.

public ClusterState randomlyUpdateClusterState(ClusterState state, Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap, Supplier<MockIndicesService> indicesServiceSupplier) {
    // randomly remove no_master blocks
    if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
        state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)).build();
    }
    // randomly add no_master blocks
    if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) {
        ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES;
        state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build();
    }
    // if no_master block is in place, make no other cluster state changes
    if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
        return state;
    }
    // randomly create new indices (until we have 200 max)
    for (int i = 0; i < randomInt(5); i++) {
        if (state.metadata().indices().size() > 200) {
            break;
        }
        String name = "index_" + randomAlphaOfLength(15).toLowerCase(Locale.ROOT);
        Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3));
        if (randomBoolean()) {
            int min = randomInt(2);
            int max = min + randomInt(3);
            settingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, randomBoolean() ? min + "-" + max : min + "-all");
        } else {
            settingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
        }
        CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
        state = cluster.createIndex(state, request);
        assertTrue(state.metadata().hasIndex(name));
    }
    // randomly delete indices
    Set<String> indicesToDelete = new HashSet<>();
    int numberOfIndicesToDelete = randomInt(Math.min(2, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metadata().indices().keys().toArray(String.class))) {
        indicesToDelete.add(state.metadata().index(index).getIndex().getName());
    }
    if (indicesToDelete.isEmpty() == false) {
        DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
        state = cluster.deleteIndices(state, deleteRequest);
        for (String index : indicesToDelete) {
            assertFalse(state.metadata().hasIndex(index));
        }
    }
    // randomly close indices
    int numberOfIndicesToClose = randomInt(Math.min(1, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToClose, state.metadata().indices().keys().toArray(String.class))) {
        CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metadata().index(index).getIndex().getName());
        state = cluster.closeIndices(state, closeIndexRequest);
    }
    // randomly open indices
    int numberOfIndicesToOpen = randomInt(Math.min(1, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metadata().indices().keys().toArray(String.class))) {
        OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metadata().index(index).getIndex().getName());
        state = cluster.openIndices(state, openIndexRequest);
    }
    // randomly update settings
    Set<String> indicesToUpdate = new HashSet<>();
    boolean containsClosedIndex = false;
    int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metadata().indices().size()));
    for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metadata().indices().keys().toArray(String.class))) {
        indicesToUpdate.add(state.metadata().index(index).getIndex().getName());
        if (state.metadata().index(index).getState() == IndexMetadata.State.CLOSE) {
            containsClosedIndex = true;
        }
    }
    if (indicesToUpdate.isEmpty() == false) {
        UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
        Settings.Builder settings = Settings.builder();
        if (containsClosedIndex == false) {
            settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
        }
        settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
        updateSettingsRequest.settings(settings.build());
        state = cluster.updateSettings(state, updateSettingsRequest);
    }
    // randomly reroute
    if (rarely()) {
        state = cluster.reroute(state, new ClusterRerouteRequest());
    }
    // randomly start and fail allocated shards
    final Map<ShardRouting, Long> startedShards = new HashMap<>();
    List<FailedShard> failedShards = new ArrayList<>();
    for (DiscoveryNode node : state.nodes()) {
        IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
        MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
        for (MockIndexService indexService : indicesService) {
            for (MockIndexShard indexShard : indexService) {
                ShardRouting persistedShardRouting = indexShard.routingEntry();
                if (persistedShardRouting.initializing() && randomBoolean()) {
                    startedShards.put(persistedShardRouting, indexShard.term());
                } else if (rarely()) {
                    failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception(), randomBoolean()));
                }
            }
        }
    }
    state = cluster.applyFailedShards(state, failedShards);
    state = cluster.applyStartedShards(state, startedShards);
    // randomly add and remove nodes (except current master)
    if (rarely()) {
        if (randomBoolean()) {
            // add node
            if (state.nodes().getSize() < 10) {
                state = cluster.addNodes(state, Collections.singletonList(createNode()));
                updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
            }
        } else {
            // remove node
            if (state.nodes().getDataNodes().size() > 3) {
                DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
                if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
                    state = cluster.removeNodes(state, Collections.singletonList(discoveryNode));
                    updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
                }
                if (randomBoolean()) {
                    // and add it back
                    state = cluster.addNodes(state, Collections.singletonList(discoveryNode));
                    updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
                }
            }
        }
    }
    return state;
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) UpdateSettingsRequest(org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DeleteIndexRequest(org.opensearch.action.admin.indices.delete.DeleteIndexRequest) ClusterBlock(org.opensearch.cluster.block.ClusterBlock) ClusterRerouteRequest(org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest) Settings(org.opensearch.common.settings.Settings) HashSet(java.util.HashSet) OpenIndexRequest(org.opensearch.action.admin.indices.open.OpenIndexRequest) FailedShard(org.opensearch.cluster.routing.allocation.FailedShard) CloseIndexRequest(org.opensearch.action.admin.indices.close.CloseIndexRequest) CreateIndexRequest(org.opensearch.action.admin.indices.create.CreateIndexRequest) ShardRouting(org.opensearch.cluster.routing.ShardRouting)

Aggregations

CreateIndexRequest (org.opensearch.action.admin.indices.create.CreateIndexRequest)23 ClusterState (org.opensearch.cluster.ClusterState)9 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)9 ThreadPool (org.opensearch.threadpool.ThreadPool)8 IOException (java.io.IOException)6 TestThreadPool (org.opensearch.threadpool.TestThreadPool)6 ArrayList (java.util.ArrayList)5 Matchers.containsString (org.hamcrest.Matchers.containsString)5 ClusterRerouteRequest (org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest)5 ClusterName (org.opensearch.cluster.ClusterName)5 AliasMetadata (org.opensearch.cluster.metadata.AliasMetadata)5 IndexNameExpressionResolver (org.opensearch.cluster.metadata.IndexNameExpressionResolver)5 IndexTemplateMetadata (org.opensearch.cluster.metadata.IndexTemplateMetadata)5 Metadata (org.opensearch.cluster.metadata.Metadata)5 MetadataCreateIndexService (org.opensearch.cluster.metadata.MetadataCreateIndexService)5 ClusterService (org.opensearch.cluster.service.ClusterService)5 Settings (org.opensearch.common.settings.Settings)5 Index (org.opensearch.index.Index)5 List (java.util.List)4 Map (java.util.Map)4