use of org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration in project crate by crate.
the class CoordinationStateTests method testHandleCommit.
public void testHandleCommit() {
VotingConfiguration initialConfig = new VotingConfiguration(Collections.singleton(node1.getId()));
ClusterState state1 = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L);
cs1.setInitialState(state1);
StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5));
Join v1 = cs1.handleStartJoin(startJoinRequest1);
assertTrue(cs1.handleJoin(v1));
assertTrue(cs1.electionWon());
Join v2 = cs2.handleStartJoin(startJoinRequest1);
assertTrue(cs1.handleJoin(v2));
VotingConfiguration newConfig = new VotingConfiguration(Collections.singleton(node2.getId()));
ClusterState state2 = clusterState(startJoinRequest1.getTerm(), 2L, node1, initialConfig, newConfig, 7L);
PublishRequest publishRequest = cs1.handleClientValue(state2);
PublishResponse publishResponse = cs1.handlePublishRequest(publishRequest);
cs1.handlePublishResponse(node1, publishResponse);
Optional<ApplyCommitRequest> applyCommit = cs1.handlePublishResponse(node2, publishResponse);
assertTrue(applyCommit.isPresent());
assertThat(cs1.getLastCommittedConfiguration(), equalTo(initialConfig));
cs1.handleCommit(applyCommit.get());
assertThat(cs1.getLastCommittedConfiguration(), equalTo(newConfig));
}
use of org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration in project crate by crate.
the class CoordinationStateTests method testHandlePublishResponseWithoutCommitedConfigQuorum.
public void testHandlePublishResponseWithoutCommitedConfigQuorum() {
VotingConfiguration configNode1 = new VotingConfiguration(Collections.singleton(node1.getId()));
VotingConfiguration configNode2 = new VotingConfiguration(Collections.singleton(node2.getId()));
ClusterState state1 = clusterState(0L, 0L, node1, configNode1, configNode1, 42L);
cs1.setInitialState(state1);
StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5));
Join v1 = cs1.handleStartJoin(startJoinRequest1);
assertTrue(cs1.handleJoin(v1));
assertTrue(cs1.electionWon());
Join v2 = cs2.handleStartJoin(startJoinRequest1);
assertTrue(cs1.handleJoin(v2));
ClusterState state2 = clusterState(startJoinRequest1.getTerm(), 2L, node1, configNode1, configNode2, 42L);
PublishRequest publishRequest = cs1.handleClientValue(state2);
PublishResponse publishResponse = cs2.handlePublishRequest(publishRequest);
Optional<ApplyCommitRequest> applyCommit = cs1.handlePublishResponse(node2, publishResponse);
assertFalse(applyCommit.isPresent());
}
use of org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration in project crate by crate.
the class CoordinatorTests method testCanShrinkFromFiveNodesToThree.
public void testCanShrinkFromFiveNodesToThree() {
try (Cluster cluster = new Cluster(5)) {
cluster.runRandomly();
cluster.stabilise();
{
final ClusterNode leader = cluster.getAnyLeader();
logger.info("setting auto-shrink reconfiguration to false");
leader.submitSetAutoShrinkVotingConfiguration(false);
cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY);
assertFalse(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.get(leader.getLastAppliedClusterState().metadata().settings()));
}
final ClusterNode disconnect1 = cluster.getAnyNode();
final ClusterNode disconnect2 = cluster.getAnyNodeExcept(disconnect1);
logger.info("--> disconnecting {} and {}", disconnect1, disconnect2);
disconnect1.disconnect();
disconnect2.disconnect();
cluster.stabilise();
final ClusterNode leader = cluster.getAnyLeader();
{
final VotingConfiguration lastCommittedConfiguration = leader.getLastAppliedClusterState().getLastCommittedConfiguration();
assertThat(lastCommittedConfiguration + " should be all nodes", lastCommittedConfiguration.getNodeIds(), equalTo(cluster.clusterNodes.stream().map(ClusterNode::getId).collect(Collectors.toSet())));
}
logger.info("setting auto-shrink reconfiguration to true");
leader.submitSetAutoShrinkVotingConfiguration(true);
// allow for a reconfiguration
cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY * 2);
assertTrue(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.get(leader.getLastAppliedClusterState().metadata().settings()));
{
final VotingConfiguration lastCommittedConfiguration = leader.getLastAppliedClusterState().getLastCommittedConfiguration();
assertThat(lastCommittedConfiguration + " should be 3 nodes", lastCommittedConfiguration.getNodeIds().size(), equalTo(3));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect1.getId()));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect2.getId()));
}
}
}
use of org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration in project crate by crate.
the class CoordinatorTests method testCannotSetInitialConfigurationWithoutLocalNode.
public void testCannotSetInitialConfigurationWithoutLocalNode() {
try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) {
final Coordinator coordinator = cluster.getAnyNode().coordinator;
final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration(Set.of("unknown-node"));
final String exceptionMessage = expectThrows(CoordinationStateRejectedException.class, () -> coordinator.setInitialConfiguration(unknownNodeConfiguration)).getMessage();
assertThat(exceptionMessage, equalTo("local node is not part of initial configuration"));
}
}
use of org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration in project crate by crate.
the class CoordinatorTests method testExpandsConfigurationWhenGrowingFromThreeToFiveNodesAndShrinksBackToThreeOnFailure.
public void testExpandsConfigurationWhenGrowingFromThreeToFiveNodesAndShrinksBackToThreeOnFailure() {
try (Cluster cluster = new Cluster(3)) {
cluster.runRandomly();
cluster.stabilise();
final ClusterNode leader = cluster.getAnyLeader();
logger.info("setting auto-shrink reconfiguration to true");
leader.submitSetAutoShrinkVotingConfiguration(true);
cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY);
assertTrue(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.get(leader.getLastAppliedClusterState().metadata().settings()));
cluster.addNodesAndStabilise(2);
{
assertThat(leader.coordinator.getMode(), is(Mode.LEADER));
final VotingConfiguration lastCommittedConfiguration = leader.getLastAppliedClusterState().getLastCommittedConfiguration();
assertThat(lastCommittedConfiguration + " should be all nodes", lastCommittedConfiguration.getNodeIds(), equalTo(cluster.clusterNodes.stream().map(ClusterNode::getId).collect(Collectors.toSet())));
}
final ClusterNode disconnect1 = cluster.getAnyNode();
final ClusterNode disconnect2 = cluster.getAnyNodeExcept(disconnect1);
logger.info("--> disconnecting {} and {}", disconnect1, disconnect2);
disconnect1.disconnect();
disconnect2.disconnect();
cluster.stabilise();
{
final ClusterNode newLeader = cluster.getAnyLeader();
final VotingConfiguration lastCommittedConfiguration = newLeader.getLastAppliedClusterState().getLastCommittedConfiguration();
assertThat(lastCommittedConfiguration + " should be 3 nodes", lastCommittedConfiguration.getNodeIds().size(), equalTo(3));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect1.getId()));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect2.getId()));
}
// we still tolerate the loss of one more node here
final ClusterNode disconnect3 = cluster.getAnyNodeExcept(disconnect1, disconnect2);
logger.info("--> disconnecting {}", disconnect3);
disconnect3.disconnect();
cluster.stabilise();
{
final ClusterNode newLeader = cluster.getAnyLeader();
final VotingConfiguration lastCommittedConfiguration = newLeader.getLastAppliedClusterState().getLastCommittedConfiguration();
assertThat(lastCommittedConfiguration + " should be 3 nodes", lastCommittedConfiguration.getNodeIds().size(), equalTo(3));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect1.getId()));
assertFalse(lastCommittedConfiguration.getNodeIds().contains(disconnect2.getId()));
assertTrue(lastCommittedConfiguration.getNodeIds().contains(disconnect3.getId()));
}
// however we do not tolerate the loss of yet another one
final ClusterNode disconnect4 = cluster.getAnyNodeExcept(disconnect1, disconnect2, disconnect3);
logger.info("--> disconnecting {}", disconnect4);
disconnect4.disconnect();
cluster.runFor(DEFAULT_STABILISATION_TIME, "allowing time for fault detection");
for (final ClusterNode clusterNode : cluster.clusterNodes) {
assertThat(clusterNode.getId() + " should be a candidate", clusterNode.coordinator.getMode(), equalTo(Mode.CANDIDATE));
}
// moreover we are still stuck even if two other nodes heal
logger.info("--> healing {} and {}", disconnect1, disconnect2);
disconnect1.heal();
disconnect2.heal();
cluster.runFor(DEFAULT_STABILISATION_TIME, "allowing time for fault detection");
for (final ClusterNode clusterNode : cluster.clusterNodes) {
assertThat(clusterNode.getId() + " should be a candidate", clusterNode.coordinator.getMode(), equalTo(Mode.CANDIDATE));
}
// we require another node to heal to recover
final ClusterNode toHeal = randomBoolean() ? disconnect3 : disconnect4;
logger.info("--> healing {}", toHeal);
toHeal.heal();
cluster.stabilise();
}
}
Aggregations