use of org.elasticsearch.cluster.routing.RoutingTable in project elasticsearch by elastic.
the class IndexBalanceTests method testBalanceAllNodesStartedAddIndex.
public void testBalanceAllNodesStartedAddIndex() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put("cluster.routing.allocation.node_initial_primaries_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
ClusterState newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
RoutingNodes routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
}
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
logger.info("Add new index 3 shards 1 replica");
MetaData updatedMetaData = MetaData.builder(clusterState.metaData()).put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))).build();
RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).addAsNew(updatedMetaData.index("test1")).build();
clusterState = ClusterState.builder(clusterState).metaData(updatedMetaData).routingTable(updatedRoutingTable).build();
assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
}
logger.info("Another round of rebalancing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
// backup shards are initializing as well, we make sure that they
// recover from primary *started* shards in the
// IndicesClusterStateService
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("Reroute, nothing should change");
newState = strategy.reroute(clusterState, "reroute");
assertThat(newState, equalTo(clusterState));
logger.info("Start the more shards");
routingNodes = clusterState.getRoutingNodes();
newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
}
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
}
use of org.elasticsearch.cluster.routing.RoutingTable in project elasticsearch by elastic.
the class MaxRetryAllocationDeciderTests method createInitialClusterState.
private ClusterState createInitialClusterState() {
MetaData.Builder metaBuilder = MetaData.builder();
metaBuilder.put(IndexMetaData.builder("idx").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));
MetaData metaData = metaBuilder.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
routingTableBuilder.addAsNew(metaData.index("idx"));
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute", false).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertEquals(prevRoutingTable.index("idx").shards().size(), 1);
assertEquals(prevRoutingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
return clusterState;
}
use of org.elasticsearch.cluster.routing.RoutingTable in project elasticsearch by elastic.
the class MaxRetryAllocationDeciderTests method testFailedAllocation.
public void testFailedAllocation() {
ClusterState clusterState = createInitialClusterState();
RoutingTable routingTable = clusterState.routingTable();
final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);
// now fail it N-1 times
for (int i = 0; i < retries - 1; i++) {
List<FailedShard> failedShards = Collections.singletonList(new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i, new UnsupportedOperationException()));
ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.state(), INITIALIZING);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), i + 1);
assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom" + i);
// MaxRetryAllocationDecider#canForceAllocatePrimary should return YES decisions because canAllocate returns YES here
assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary(unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false)));
}
// now we go and check that we are actually stick to unassigned on the next failure
{
List<FailedShard> failedShards = Collections.singletonList(new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom", new UnsupportedOperationException()));
ClusterState newState = strategy.applyFailedShards(clusterState, failedShards);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), retries);
assertEquals(unassignedPrimary.state(), UNASSIGNED);
assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom");
// MaxRetryAllocationDecider#canForceAllocatePrimary should return a NO decision because canAllocate returns NO here
assertEquals(Decision.NO, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary(unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false)));
}
// change the settings and ensure we can do another round of allocation for that index.
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(MetaData.builder(clusterState.metaData()).put(IndexMetaData.builder(clusterState.metaData().index("idx")).settings(Settings.builder().put(clusterState.metaData().index("idx").getSettings()).put("index.allocation.max_retries", retries + 1).build()).build(), true).build()).build();
ClusterState newState = strategy.reroute(clusterState, "settings changed", false);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingTable = newState.routingTable();
// good we are initializing and we are maintaining failure information
assertEquals(routingTable.index("idx").shards().size(), 1);
ShardRouting unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), retries);
assertEquals(unassignedPrimary.state(), INITIALIZING);
assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom");
// bumped up the max retry count, so canForceAllocatePrimary should return a YES decision
assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary(routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0, false)));
// now we start the shard
clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(routingTable.index("idx").shard(0).shards().get(0)));
routingTable = clusterState.routingTable();
// all counters have been reset to 0 ie. no unassigned info
assertEquals(routingTable.index("idx").shards().size(), 1);
assertNull(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo());
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), STARTED);
// now fail again and see if it has a new counter
List<FailedShard> failedShards = Collections.singletonList(new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "ZOOOMG", new UnsupportedOperationException()));
newState = strategy.applyFailedShards(clusterState, failedShards);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingTable = newState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
unassignedPrimary = routingTable.index("idx").shard(0).shards().get(0);
assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), 1);
assertEquals(unassignedPrimary.state(), UNASSIGNED);
assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "ZOOOMG");
// Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision
assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary(unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false)));
}
use of org.elasticsearch.cluster.routing.RoutingTable in project elasticsearch by elastic.
the class NodeVersionAllocationDeciderTests method testDoNotAllocateFromPrimary.
public void testDoNotAllocateFromPrimary() {
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)).build();
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
}
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
routingNodes = clusterState.getRoutingNodes();
clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3", VersionUtils.getPreviousVersion()))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
}
routingNodes = clusterState.getRoutingNodes();
clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
}
}
use of org.elasticsearch.cluster.routing.RoutingTable in project elasticsearch by elastic.
the class NodeJoinControllerTests method testElectionBasedOnConflictingNodes.
/**
* Tests tha node can become a master, even though the last cluster state it knows contains
* nodes that conflict with the joins it got and needs to become a master
*/
public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException {
final DiscoveryNode masterNode = clusterService.localNode();
final DiscoveryNode otherNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT);
// simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2)
// also add some shards to that node
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
discoBuilder.masterNodeId(null);
discoBuilder.add(otherNode);
ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.state()).nodes(discoBuilder);
if (randomBoolean()) {
IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex());
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId("test", "_na_", 0);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode;
final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode;
final boolean primaryStarted = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, true, primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there")));
if (primaryStarted) {
boolean replicaStared = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, false, replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there")));
} else {
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, false, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks")));
}
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build();
IndexMetaData updatedIndexMetaData = updateActiveAllocations(indexRoutingTable, indexMetaData);
stateBuilder.metaData(MetaData.builder().put(updatedIndexMetaData, false).generateClusterUuidIfNeeded()).routingTable(RoutingTable.builder().add(indexRoutingTable).build());
}
setState(clusterService, stateBuilder.build());
// conflict on node id or address
final DiscoveryNode conflictingNode = randomBoolean() ? new DiscoveryNode(otherNode.getId(), randomBoolean() ? otherNode.getAddress() : buildNewFakeTransportAddress(), otherNode.getAttributes(), otherNode.getRoles(), Version.CURRENT) : new DiscoveryNode("conflicting_address_node", otherNode.getAddress(), otherNode.getAttributes(), otherNode.getRoles(), Version.CURRENT);
nodeJoinController.startElectionContext();
final SimpleFuture joinFuture = joinNodeAsync(conflictingNode);
final CountDownLatch elected = new CountDownLatch(1);
nodeJoinController.waitToBeElectedAsMaster(1, TimeValue.timeValueHours(5), new NodeJoinController.ElectionCallback() {
@Override
public void onElectedAsMaster(ClusterState state) {
elected.countDown();
}
@Override
public void onFailure(Throwable t) {
logger.error("failed to be elected as master", t);
throw new AssertionError("failed to be elected as master", t);
}
});
elected.await();
// throw any exception
joinFuture.get();
final ClusterState finalState = clusterService.state();
final DiscoveryNodes finalNodes = finalState.nodes();
assertTrue(finalNodes.isLocalNodeElectedMaster());
assertThat(finalNodes.getLocalNode(), equalTo(masterNode));
assertThat(finalNodes.getSize(), equalTo(2));
assertThat(finalNodes.get(conflictingNode.getId()), equalTo(conflictingNode));
List<ShardRouting> activeShardsOnRestartedNode = StreamSupport.stream(finalState.getRoutingNodes().node(conflictingNode.getId()).spliterator(), false).filter(ShardRouting::active).collect(Collectors.toList());
assertThat(activeShardsOnRestartedNode, empty());
}
Aggregations