use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project elasticsearch by elastic.
the class AckIT method testClusterRerouteAcknowledgement.
public void testClusterRerouteAcknowledgement() throws InterruptedException {
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS)).put(SETTING_NUMBER_OF_REPLICAS, 0)));
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex();
final ShardId commandShard = new ShardId(index, moveAllocationCommand.shardId());
assertAcked(client().admin().cluster().prepareReroute().add(moveAllocationCommand));
for (Client client : clients()) {
ClusterState clusterState = getLocalClusterState(client);
for (ShardRouting shardRouting : clusterState.getRoutingNodes().node(moveAllocationCommand.fromNode())) {
//if the shard that we wanted to move is still on the same node, it must be relocating
if (shardRouting.shardId().equals(commandShard)) {
assertThat(shardRouting.relocating(), equalTo(true));
}
}
boolean found = false;
for (ShardRouting shardRouting : clusterState.getRoutingNodes().node(moveAllocationCommand.toNode())) {
if (shardRouting.shardId().equals(commandShard)) {
assertThat(shardRouting.state(), anyOf(equalTo(ShardRoutingState.INITIALIZING), equalTo(ShardRoutingState.STARTED)));
found = true;
break;
}
}
assertThat(found, equalTo(true));
}
}
use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project elasticsearch by elastic.
the class AckIT method testClusterRerouteNoAcknowledgement.
public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS)).put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get();
assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false));
}
use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project elasticsearch by elastic.
the class AckIT method testClusterRerouteAcknowledgementDryRun.
public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS)).put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex();
final ShardId commandShard = new ShardId(index, moveAllocationCommand.shardId());
assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));
//testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that
//all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update.
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
boolean found = false;
for (ShardRouting shardRouting : clusterStateResponse.getState().getRoutingNodes().node(moveAllocationCommand.fromNode())) {
//the shard that we wanted to move is still on the same node, as we had dryRun flag
if (shardRouting.shardId().equals(commandShard)) {
assertThat(shardRouting.started(), equalTo(true));
found = true;
break;
}
}
assertThat(found, equalTo(true));
for (ShardRouting shardRouting : clusterStateResponse.getState().getRoutingNodes().node(moveAllocationCommand.toNode())) {
if (shardRouting.shardId().equals(commandShard)) {
fail("shard [" + shardRouting + "] shouldn't be on node [" + moveAllocationCommand.toString() + "]");
}
}
}
use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project elasticsearch by elastic.
the class AckIT method testClusterRerouteNoAcknowledgementDryRun.
public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS)).put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get();
//acknowledged anyway as no changes were made
assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true));
}
use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project elasticsearch by elastic.
the class DiskThresholdDeciderTests method testShardRelocationsTakenIntoAccount.
public void testShardRelocationsTakenIntoAccount() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 60% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
// 14 bytes
shardSizesBuilder.put("[test][0][p]", 14L);
shardSizesBuilder.put("[test][0][r]", 14L);
// 1 bytes
shardSizesBuilder.put("[test2][0][p]", 1L);
shardSizesBuilder.put("[test2][0][r]", 1L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
DiskThresholdDecider decider = makeDecider(diskSettings);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), decider)));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
@Override
public void addListener(Listener listener) {
// noop
}
};
AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test2")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// shards should be initializing
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
logger.info("--> start the shards");
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that we're able to start the primary and replicas
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(relocate1);
clusterState = strategy.reroute(clusterState, cmds, false, false).getClusterState();
logShardStates(clusterState);
AllocationCommand relocate2 = new MoveAllocationCommand("test2", 0, "node2", "node3");
cmds = new AllocationCommands(relocate2);
try {
// The shard for the "test" index is already being relocated to
// node3, which will put it over the low watermark when it
// completes, with shard relocations taken into account this should
// throw an exception about not being able to complete
strategy.reroute(clusterState, cmds, false, false);
fail("should not have been able to reroute the shard");
} catch (IllegalArgumentException e) {
assertThat("can't be allocated because there isn't enough room: " + e.getMessage(), e.getMessage(), containsString("the node is above the low watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.low=0.7], using more disk space than the maximum " + "allowed [70.0%], actual free: [26.0%]"));
}
}
Aggregations