use of org.apache.ratis.MiniRaftCluster.PeerChanges in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method testKillLeaderDuringReconf.
/**
* kill the leader before reconfiguration finishes. Make sure the client keeps
* retrying.
*/
@Test
public void testKillLeaderDuringReconf() throws Exception {
LOG.info("Start testKillLeaderDuringReconf");
// originally 3 peers
final MiniRaftCluster cluster = getCluster(3);
cluster.start();
try {
RaftTestUtil.waitForLeader(cluster);
final RaftPeerId leaderId = cluster.getLeader().getId();
final RaftClient client = cluster.createClient(leaderId);
PeerChanges c1 = cluster.addNewPeers(2, false);
PeerChanges c2 = cluster.removePeers(2, false, asList(c1.newPeers));
LOG.info("Start changing the configuration: {}", asList(c2.allPeersInNewConf));
final AtomicReference<Boolean> success = new AtomicReference<>();
final AtomicBoolean clientRunning = new AtomicBoolean(true);
Thread clientThread = new Thread(() -> {
try {
boolean r = false;
while (clientRunning.get() && !r) {
r = client.setConfiguration(c2.allPeersInNewConf).isSuccess();
}
success.set(r);
client.close();
} catch (IOException ignored) {
}
});
clientThread.start();
// the leader cannot generate the (old, new) conf, and it will keep
// bootstrapping the 2 new peers since they have not started yet
LOG.info(cluster.printServers());
Assert.assertFalse(cluster.getLeader().getRaftConf().isTransitional());
// only the first empty entry got committed
final long committedIndex = cluster.getLeader().getState().getLog().getLastCommittedIndex();
Assert.assertTrue("committedIndex is " + committedIndex, committedIndex <= 1);
LOG.info("kill the current leader");
final String oldLeaderId = RaftTestUtil.waitAndKillLeader(cluster, true);
LOG.info("start the two new peers: {}", Arrays.asList(c1.newPeers));
for (RaftPeer np : c1.newPeers) {
cluster.startServer(np.getId());
}
Thread.sleep(3000);
// the client should get the NotLeaderException from the first leader, and
// will retry the same setConfiguration request
waitAndCheckNewConf(cluster, c2.allPeersInNewConf, 2, Collections.singletonList(oldLeaderId));
clientRunning.set(false);
// Assert.assertTrue(success.get());
} finally {
cluster.shutdown();
}
}
use of org.apache.ratis.MiniRaftCluster.PeerChanges in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method testReconfTimeout.
@Test
public void testReconfTimeout() throws Exception {
LOG.info("Start testReconfTimeout");
// originally 3 peers
final MiniRaftCluster cluster = getCluster(3);
cluster.start();
try {
RaftTestUtil.waitForLeader(cluster);
final RaftPeerId leaderId = cluster.getLeader().getId();
final RaftClient client = cluster.createClient(leaderId);
PeerChanges c1 = cluster.addNewPeers(2, false);
LOG.info("Start changing the configuration: {}", asList(c1.allPeersInNewConf));
Assert.assertFalse(cluster.getLeader().getRaftConf().isTransitional());
final RaftClientRpc sender = client.getClientRpc();
final SetConfigurationRequest request = cluster.newSetConfigurationRequest(client.getId(), leaderId, c1.allPeersInNewConf);
try {
sender.sendRequest(request);
Assert.fail("did not get expected exception");
} catch (IOException e) {
Assert.assertTrue("Got exception " + e, e instanceof ReconfigurationTimeoutException);
}
// the two new peers have not started yet, the bootstrapping must timeout
LOG.info(cluster.printServers());
// state so that we still get timeout instead of in-progress exception
try {
sender.sendRequest(request);
Assert.fail("did not get expected exception");
} catch (IOException e) {
Assert.assertTrue("Got exception " + e, e instanceof ReconfigurationTimeoutException);
}
// start the two new peers
LOG.info("Start new peers");
for (RaftPeer np : c1.newPeers) {
cluster.startServer(np.getId());
}
Assert.assertTrue(client.setConfiguration(c1.allPeersInNewConf).isSuccess());
client.close();
} finally {
cluster.shutdown();
}
}
Aggregations