use of org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS in project OpenSearch by opensearch-project.
the class MetadataCreateIndexServiceTests method testTemplateOrder.
public void testTemplateOrder() throws Exception {
List<IndexTemplateMetadata> templates = new ArrayList<>(3);
templates.add(addMatchingTemplate(builder -> builder.order(3).settings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 12)).putAlias(AliasMetadata.builder("alias1").writeIndex(true).searchRouting("3").build())));
templates.add(addMatchingTemplate(builder -> builder.order(2).settings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 11)).putAlias(AliasMetadata.builder("alias1").searchRouting("2").build())));
templates.add(addMatchingTemplate(builder -> builder.order(1).settings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 10)).putAlias(AliasMetadata.builder("alias1").searchRouting("1").build())));
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, MetadataIndexTemplateService.resolveSettings(templates), null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), Collections.emptySet());
List<AliasMetadata> resolvedAliases = resolveAndValidateAliases(request.index(), request.aliases(), MetadataIndexTemplateService.resolveAliases(templates), Metadata.builder().build(), aliasValidator, xContentRegistry(), queryShardContext);
assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("12"));
AliasMetadata alias = resolvedAliases.get(0);
assertThat(alias.getSearchRouting(), equalTo("3"));
assertThat(alias.writeIndex(), is(true));
}
use of org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS in project OpenSearch by opensearch-project.
the class AutoExpandReplicasTests method testAutoExpandWhenNodeLeavesAndPossiblyRejoins.
/**
* Checks that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on
* the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node.
* Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only
* triggers in a follow-up step.
*/
public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException {
final ThreadPool threadPool = new TestThreadPool(getClass().getName());
final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
try {
List<DiscoveryNode> allNodes = new ArrayList<>();
// local node is the master
DiscoveryNode localNode = createNode(DiscoveryNodeRole.MASTER_ROLE);
allNodes.add(localNode);
int numDataNodes = randomIntBetween(3, 5);
List<DiscoveryNode> dataNodes = new ArrayList<>(numDataNodes);
for (int i = 0; i < numDataNodes; i++) {
dataNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE));
}
allNodes.addAll(dataNodes);
ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0]));
CreateIndexRequest request = new CreateIndexRequest("index", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metadata().hasIndex("index"));
while (state.routingTable().index("index").shard(0).allShardsStarted() == false) {
logger.info(state);
state = cluster.applyStartedShards(state, state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING));
state = cluster.reroute(state, new ClusterRerouteRequest());
}
IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);
final Set<String> unchangedNodeIds;
final IndexShardRoutingTable postTable;
if (randomBoolean()) {
// simulate node removal
List<DiscoveryNode> nodesToRemove = randomSubsetOf(2, dataNodes);
unchangedNodeIds = dataNodes.stream().filter(n -> nodesToRemove.contains(n) == false).map(DiscoveryNode::getId).collect(Collectors.toSet());
state = cluster.removeNodes(state, nodesToRemove);
postTable = state.routingTable().index("index").shard(0);
assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted());
assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(is(in(preTable.getAllAllocationIds()))));
} else {
// fake an election where conflicting nodes are removed and readded
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build();
List<DiscoveryNode> conflictingNodes = randomSubsetOf(2, dataNodes);
unchangedNodeIds = dataNodes.stream().filter(n -> conflictingNodes.contains(n) == false).map(DiscoveryNode::getId).collect(Collectors.toSet());
List<DiscoveryNode> nodesToAdd = conflictingNodes.stream().map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())).collect(Collectors.toList());
if (randomBoolean()) {
nodesToAdd.add(createNode(DiscoveryNodeRole.DATA_ROLE));
}
state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd);
postTable = state.routingTable().index("index").shard(0);
}
Set<String> unchangedAllocationIds = preTable.getShards().stream().filter(shr -> unchangedNodeIds.contains(shr.currentNodeId())).map(shr -> shr.allocationId().getId()).collect(Collectors.toSet());
assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getAllAllocationIds()))));
postTable.getShards().forEach(shardRouting -> {
if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) {
assertTrue("Shard should be active: " + shardRouting, shardRouting.active());
}
});
} finally {
terminate(threadPool);
}
}
use of org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS in project OpenSearch by opensearch-project.
the class TransportReplicationAllPermitsAcquisitionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
globalBlock = randomBoolean();
RestStatus restStatus = randomFrom(RestStatus.values());
block = new ClusterBlock(randomIntBetween(1, 10), randomAlphaOfLength(5), false, true, false, restStatus, ClusterBlockLevel.ALL);
clusterService = createClusterService(threadPool);
final ClusterState.Builder state = ClusterState.builder(clusterService.state());
Set<DiscoveryNodeRole> roles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES);
DiscoveryNode node1 = new DiscoveryNode("_name1", "_node1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
DiscoveryNode node2 = new DiscoveryNode("_name2", "_node2", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
state.nodes(DiscoveryNodes.builder().add(node1).add(node2).localNodeId(node1.getId()).masterNodeId(node1.getId()));
shardId = new ShardId("index", UUID.randomUUID().toString(), 0);
ShardRouting shardRouting = newShardRouting(shardId, node1.getId(), true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_INDEX_UUID, shardId.getIndex().getUUID()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).put(SETTING_CREATION_DATE, System.currentTimeMillis()).build();
primary = newStartedShard(p -> newShard(shardRouting, indexSettings, new InternalEngineFactory()), true);
for (int i = 0; i < 10; i++) {
final String id = Integer.toString(i);
indexDoc(primary, "_doc", id, "{\"value\":" + id + "}");
}
IndexMetadata indexMetadata = IndexMetadata.builder(shardId.getIndexName()).settings(indexSettings).primaryTerm(shardId.id(), primary.getOperationPrimaryTerm()).putMapping("_doc", "{ \"properties\": { \"value\": { \"type\": \"short\"}}}").build();
state.metadata(Metadata.builder().put(indexMetadata, false).generateClusterUuidIfNeeded());
replica = newShard(primary.shardId(), false, node2.getId(), indexMetadata, null);
recoverReplica(replica, primary, true);
IndexRoutingTable.Builder routing = IndexRoutingTable.builder(indexMetadata.getIndex());
routing.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(primary.routingEntry()).build());
state.routingTable(RoutingTable.builder().add(routing.build()).build());
setState(clusterService, state.build());
final Settings transportSettings = Settings.builder().put("node.name", node1.getId()).build();
MockTransport transport = new MockTransport() {
@Override
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {
assertThat(action, allOf(startsWith("cluster:admin/test/"), endsWith("[r]")));
assertThat(node, equalTo(node2));
// node2 doesn't really exist, but we are performing some trickery in mockIndicesService() to pretend that node1 holds both
// the primary and the replica, so redirect the request back to node1.
transportService.sendRequest(transportService.getLocalNode(), action, request, new TransportResponseHandler<TransportReplicationAction.ReplicaResponse>() {
@Override
public TransportReplicationAction.ReplicaResponse read(StreamInput in) throws IOException {
return new TransportReplicationAction.ReplicaResponse(in);
}
@SuppressWarnings("unchecked")
private TransportResponseHandler<TransportReplicationAction.ReplicaResponse> getResponseHandler() {
return (TransportResponseHandler<TransportReplicationAction.ReplicaResponse>) getResponseHandlers().onResponseReceived(requestId, TransportMessageListener.NOOP_LISTENER);
}
@Override
public void handleResponse(TransportReplicationAction.ReplicaResponse response) {
getResponseHandler().handleResponse(response);
}
@Override
public void handleException(TransportException exp) {
getResponseHandler().handleException(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
};
transportService = transport.createTransportService(transportSettings, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, bta -> node1, null, emptySet());
transportService.start();
transportService.acceptIncomingRequests();
shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool);
}
Aggregations