use of org.elasticsearch.cluster.routing.RoutingNodes in project elasticsearch by elastic.
the class BaseGatewayShardAllocator method allocateUnassigned.
/**
* Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
* It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
* to make decisions on assigning shards to nodes.
*
* @param allocation the allocation state container object
*/
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
final ShardRouting shard = unassignedIterator.next();
final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shard, allocation, logger);
if (allocateUnassignedDecision.isDecisionTaken() == false) {
// no decision was taken by this allocator
continue;
}
if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) {
unassignedIterator.initialize(allocateUnassignedDecision.getTargetNode().getId(), allocateUnassignedDecision.getAllocationId(), shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE : allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
} else {
unassignedIterator.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes());
}
}
}
use of org.elasticsearch.cluster.routing.RoutingNodes in project elasticsearch by elastic.
the class SingleShardOneReplicaRoutingTests method testSingleIndexFirstStartPrimaryThenBackups.
public void testSingleIndexFirstStartPrimaryThenBackups() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
ClusterState newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Reroute, nothing should change");
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
logger.info("Start the backup shard");
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Kill node1, backup shard should become primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
newState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Start another node, backup shard should start initializing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
}
use of org.elasticsearch.cluster.routing.RoutingNodes in project elasticsearch by elastic.
the class UpdateNumberOfReplicasTests method testUpdateNumberOfReplicas.
public void testUpdateNumberOfReplicas() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
assertThat(initialRoutingTable.index("test").shards().size(), equalTo(1));
assertThat(initialRoutingTable.index("test").shard(0).size(), equalTo(2));
assertThat(initialRoutingTable.index("test").shard(0).shards().size(), equalTo(2));
assertThat(initialRoutingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(initialRoutingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(initialRoutingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
assertThat(initialRoutingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start all the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
logger.info("Start all the replica shards");
routingNodes = clusterState.getRoutingNodes();
ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
final String nodeHoldingPrimary = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
final String nodeHoldingReplica = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
logger.info("add another replica");
routingNodes = clusterState.getRoutingNodes();
RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(2).build();
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build();
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
logger.info("Add another node and start the added replica");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
logger.info("now remove a replica");
routingNodes = clusterState.getRoutingNodes();
updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(1).build();
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build();
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
logger.info("do a reroute, should remain the same");
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
}
use of org.elasticsearch.cluster.routing.RoutingNodes in project elasticsearch by elastic.
the class DiskThresholdDeciderTests method testCanRemainWithShardRelocatingAway.
public void testCanRemainWithShardRelocatingAway() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build();
// We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 80% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20));
// 0% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
shardSizesBuilder.put("[test][0][p]", 40L);
shardSizesBuilder.put("[test][1][p]", 40L);
shardSizesBuilder.put("[foo][0][p]", 10L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)).put(IndexMetaData.builder("foo").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("foo")).build();
DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).nodes(discoveryNodes).build();
// Two shards consuming each 80% of disk space while 70% is allowed, so shard 0 isn't allowed here
ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED);
RoutingNode firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
RoutingTable.Builder builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
routingAllocation.debugDecision(true);
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.NO));
assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is above the high watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.high=70%] and there is less than the required [30.0%] free disk on node, " + "actual free: [20.0%]"));
// Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay
firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING);
ShardRouting fooRouting = TestShardRouting.newShardRouting("foo", 0, "node1", null, true, ShardRoutingState.UNASSIGNED);
firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
routingAllocation.debugDecision(true);
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
assertEquals("there is enough disk on this node for the shard to remain, free: [60b]", ((Decision.Single) decision).getExplanation());
decision = diskThresholdDecider.canAllocate(fooRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.NO));
if (fooRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE) {
assertThat(((Decision.Single) decision).getExplanation(), containsString("the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=70%], using " + "more disk space than the maximum allowed [70.0%], actual free: [20.0%]"));
} else {
assertThat(((Decision.Single) decision).getExplanation(), containsString("the node is above the low watermark cluster setting [cluster.routing.allocation.disk.watermark.low=60%], using more " + "disk space than the maximum allowed [60.0%], actual free: [20.0%]"));
}
// Creating AllocationService instance and the services it depends on...
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
@Override
public void addListener(Listener listener) {
// noop
}
};
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diskThresholdDecider)));
AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
// Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away
// and therefor we will have sufficient disk space on node1.
ClusterState result = strategy.reroute(clusterState, "reroute");
assertThat(result, equalTo(clusterState));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
}
use of org.elasticsearch.cluster.routing.RoutingNodes in project elasticsearch by elastic.
the class FailedShardsRoutingTests method testRebalanceFailure.
public void testRebalanceFailure() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
logger.info("Start the shards (primaries)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Start the shards (backups)");
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Adding third node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("Fail the shards on node 3");
ShardRouting shardToFail = routingNodes.node("node3").iterator().next();
newState = strategy.applyFailedShard(clusterState, shardToFail);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
// make sure the failedShard is not INITIALIZING again on node3
assertThat(routingNodes.node("node3").iterator().next().shardId(), not(equalTo(shardToFail.shardId())));
}
Aggregations