use of org.elasticsearch.cluster.routing.IndexRoutingTable in project elasticsearch by elastic.
the class ClusterIndexHealthTests method testClusterIndexHealth.
public void testClusterIndexHealth() {
RoutingTableGenerator routingTableGenerator = new RoutingTableGenerator();
int numberOfShards = randomInt(3) + 1;
int numberOfReplicas = randomInt(4);
IndexMetaData indexMetaData = IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
RoutingTableGenerator.ShardCounter counter = new RoutingTableGenerator.ShardCounter();
IndexRoutingTable indexRoutingTable = routingTableGenerator.genIndexRoutingTable(indexMetaData, counter);
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
logger.info("index status: {}, expected {}", indexHealth.getStatus(), counter.status());
assertIndexHealth(indexHealth, counter, indexMetaData);
}
use of org.elasticsearch.cluster.routing.IndexRoutingTable in project elasticsearch by elastic.
the class ClusterStateHealthTests method testClusterHealth.
public void testClusterHealth() throws IOException {
RoutingTableGenerator routingTableGenerator = new RoutingTableGenerator();
RoutingTableGenerator.ShardCounter counter = new RoutingTableGenerator.ShardCounter();
RoutingTable.Builder routingTable = RoutingTable.builder();
MetaData.Builder metaData = MetaData.builder();
for (int i = randomInt(4); i >= 0; i--) {
int numberOfShards = randomInt(3) + 1;
int numberOfReplicas = randomInt(4);
IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
IndexRoutingTable indexRoutingTable = routingTableGenerator.genIndexRoutingTable(indexMetaData, counter);
metaData.put(indexMetaData, true);
routingTable.add(indexRoutingTable);
}
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable.build()).build();
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), (String[]) null);
ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices);
logger.info("cluster status: {}, expected {}", clusterStateHealth.getStatus(), counter.status());
clusterStateHealth = maybeSerialize(clusterStateHealth);
assertClusterHealth(clusterStateHealth, counter);
}
use of org.elasticsearch.cluster.routing.IndexRoutingTable in project elasticsearch by elastic.
the class TransportIndicesShardStoresAction method masterOperation.
@Override
protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) {
final RoutingTable routingTables = state.routingTable();
final RoutingNodes routingNodes = state.getRoutingNodes();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
final Set<ShardId> shardIdsToFetch = new HashSet<>();
logger.trace("using cluster state version [{}] to determine shards", state.version());
// collect relevant shard ids of the requested indices for fetching store infos
for (String index : concreteIndices) {
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
if (indexShardRoutingTables == null) {
continue;
}
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
final int shardId = routing.shardId().id();
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing);
if (request.shardStatuses().contains(shardHealth.getStatus())) {
shardIdsToFetch.add(routing.shardId());
}
}
}
// async fetch store infos from all the nodes
// NOTE: instead of fetching shard store info one by one from every node (nShards * nNodes requests)
// we could fetch all shard store info from every node once (nNodes requests)
// we have to implement a TransportNodesAction instead of using TransportNodesListGatewayStartedShards
// for fetching shard stores info, that operates on a list of shards instead of a single shard
new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, shardIdsToFetch, listener).start();
}
use of org.elasticsearch.cluster.routing.IndexRoutingTable in project elasticsearch by elastic.
the class MetaDataCreateIndexService method validateShrinkIndex.
/**
* Validates the settings and mappings for shrinking an index.
* @return the list of nodes at least one instance of the source index shards are allocated
*/
static List<String> validateShrinkIndex(ClusterState state, String sourceIndex, Set<String> targetIndexMappingsTypes, String targetIndexName, Settings targetIndexSettings) {
if (state.metaData().hasIndex(targetIndexName)) {
throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex());
}
final IndexMetaData sourceMetaData = state.metaData().index(sourceIndex);
if (sourceMetaData == null) {
throw new IndexNotFoundException(sourceIndex);
}
// ensure index is read-only
if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) {
throw new IllegalStateException("index " + sourceIndex + " must be read-only to shrink index. use \"index.blocks.write=true\"");
}
if (sourceMetaData.getNumberOfShards() == 1) {
throw new IllegalArgumentException("can't shrink an index with only one shard");
}
if ((targetIndexMappingsTypes.size() > 1 || (targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) {
throw new IllegalArgumentException("mappings are not allowed when shrinking indices" + ", all mappings are copied from the source index");
}
if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
// this method applies all necessary checks ie. if the target shards are less than the source shards
// of if the source shards are divisible by the number of target shards
IndexMetaData.getRoutingFactor(sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
}
// now check that index is all on one node
final IndexRoutingTable table = state.routingTable().index(sourceIndex);
Map<String, AtomicInteger> nodesToNumRouting = new HashMap<>();
int numShards = sourceMetaData.getNumberOfShards();
for (ShardRouting routing : table.shardsWithState(ShardRoutingState.STARTED)) {
nodesToNumRouting.computeIfAbsent(routing.currentNodeId(), (s) -> new AtomicInteger(0)).incrementAndGet();
}
List<String> nodesToAllocateOn = new ArrayList<>();
for (Map.Entry<String, AtomicInteger> entries : nodesToNumRouting.entrySet()) {
int numAllocations = entries.getValue().get();
assert numAllocations <= numShards : "wait what? " + numAllocations + " is > than num shards " + numShards;
if (numAllocations == numShards) {
nodesToAllocateOn.add(entries.getKey());
}
}
if (nodesToAllocateOn.isEmpty()) {
throw new IllegalStateException("index " + sourceIndex + " must have all shards allocated on the same node to shrink index");
}
return nodesToAllocateOn;
}
use of org.elasticsearch.cluster.routing.IndexRoutingTable in project elasticsearch by elastic.
the class NodeJoinControllerTests method testElectionBasedOnConflictingNodes.
/**
* Tests tha node can become a master, even though the last cluster state it knows contains
* nodes that conflict with the joins it got and needs to become a master
*/
public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException {
final DiscoveryNode masterNode = clusterService.localNode();
final DiscoveryNode otherNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT);
// simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2)
// also add some shards to that node
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
discoBuilder.masterNodeId(null);
discoBuilder.add(otherNode);
ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.state()).nodes(discoBuilder);
if (randomBoolean()) {
IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex());
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId("test", "_na_", 0);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode;
final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode;
final boolean primaryStarted = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, true, primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there")));
if (primaryStarted) {
boolean replicaStared = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, false, replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there")));
} else {
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, false, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks")));
}
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build();
IndexMetaData updatedIndexMetaData = updateActiveAllocations(indexRoutingTable, indexMetaData);
stateBuilder.metaData(MetaData.builder().put(updatedIndexMetaData, false).generateClusterUuidIfNeeded()).routingTable(RoutingTable.builder().add(indexRoutingTable).build());
}
setState(clusterService, stateBuilder.build());
// conflict on node id or address
final DiscoveryNode conflictingNode = randomBoolean() ? new DiscoveryNode(otherNode.getId(), randomBoolean() ? otherNode.getAddress() : buildNewFakeTransportAddress(), otherNode.getAttributes(), otherNode.getRoles(), Version.CURRENT) : new DiscoveryNode("conflicting_address_node", otherNode.getAddress(), otherNode.getAttributes(), otherNode.getRoles(), Version.CURRENT);
nodeJoinController.startElectionContext();
final SimpleFuture joinFuture = joinNodeAsync(conflictingNode);
final CountDownLatch elected = new CountDownLatch(1);
nodeJoinController.waitToBeElectedAsMaster(1, TimeValue.timeValueHours(5), new NodeJoinController.ElectionCallback() {
@Override
public void onElectedAsMaster(ClusterState state) {
elected.countDown();
}
@Override
public void onFailure(Throwable t) {
logger.error("failed to be elected as master", t);
throw new AssertionError("failed to be elected as master", t);
}
});
elected.await();
// throw any exception
joinFuture.get();
final ClusterState finalState = clusterService.state();
final DiscoveryNodes finalNodes = finalState.nodes();
assertTrue(finalNodes.isLocalNodeElectedMaster());
assertThat(finalNodes.getLocalNode(), equalTo(masterNode));
assertThat(finalNodes.getSize(), equalTo(2));
assertThat(finalNodes.get(conflictingNode.getId()), equalTo(conflictingNode));
List<ShardRouting> activeShardsOnRestartedNode = StreamSupport.stream(finalState.getRoutingNodes().node(conflictingNode.getId()).spliterator(), false).filter(ShardRouting::active).collect(Collectors.toList());
assertThat(activeShardsOnRestartedNode, empty());
}
Aggregations