use of org.apache.ratis.client.RaftClient in project incubator-ratis by apache.
the class MiniRaftCluster method setConfiguration.
public void setConfiguration(RaftPeer... peers) throws IOException {
try (RaftClient client = createClient()) {
LOG.info("Start changing the configuration: {}", Arrays.asList(peers));
final RaftClientReply reply = client.admin().setConfiguration(peers);
Preconditions.assertTrue(reply.isSuccess());
}
}
use of org.apache.ratis.client.RaftClient in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method runTestOverlappedSetConfRequests.
void runTestOverlappedSetConfRequests(CLUSTER cluster) throws Exception {
try {
RaftTestUtil.waitForLeader(cluster);
final RaftPeerId leaderId = cluster.getLeader().getId();
RaftPeer[] newPeers = cluster.addNewPeers(2, true).allPeersInNewConf;
// delay every peer's logSync so that the setConf request is delayed
cluster.getPeers().forEach(peer -> logSyncDelay.setDelayMs(peer.getId().toString(), 1000));
final CountDownLatch latch = new CountDownLatch(1);
final RaftPeer[] peersInRequest2 = cluster.getPeers().toArray(new RaftPeer[0]);
AtomicBoolean caughtException = new AtomicBoolean(false);
new Thread(() -> {
try (final RaftClient client2 = cluster.createClient(leaderId)) {
latch.await();
LOG.info("client2 starts to change conf");
final RaftClientRpc sender2 = client2.getClientRpc();
sender2.sendRequest(cluster.newSetConfigurationRequest(client2.getId(), leaderId, peersInRequest2));
} catch (ReconfigurationInProgressException e) {
caughtException.set(true);
} catch (Exception e) {
LOG.warn("Got unexpected exception when client2 changes conf", e);
}
}).start();
AtomicBoolean confChanged = new AtomicBoolean(false);
new Thread(() -> {
try (final RaftClient client1 = cluster.createClient(leaderId)) {
LOG.info("client1 starts to change conf");
confChanged.set(client1.admin().setConfiguration(newPeers).isSuccess());
} catch (IOException e) {
LOG.warn("Got unexpected exception when client1 changes conf", e);
}
}).start();
Thread.sleep(100);
latch.countDown();
for (int i = 0; i < 10 && !confChanged.get(); i++) {
Thread.sleep(1000);
}
Assert.assertTrue(confChanged.get());
Assert.assertTrue(caughtException.get());
} finally {
logSyncDelay.clear();
}
}
use of org.apache.ratis.client.RaftClient in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method runTestReconfTimeout.
void runTestReconfTimeout(CLUSTER cluster) throws Exception {
final RaftPeerId leaderId = RaftTestUtil.waitForLeader(cluster).getId();
try (final RaftClient client = cluster.createClient(leaderId)) {
PeerChanges c1 = cluster.addNewPeers(2, false);
LOG.info("Start changing the configuration: {}", asList(c1.allPeersInNewConf));
Assert.assertFalse(((RaftConfigurationImpl) cluster.getLeader().getRaftConf()).isTransitional());
final RaftClientRpc sender = client.getClientRpc();
final SetConfigurationRequest request = cluster.newSetConfigurationRequest(client.getId(), leaderId, c1.allPeersInNewConf);
try {
sender.sendRequest(request);
Assert.fail("did not get expected exception");
} catch (IOException e) {
Assert.assertTrue("Got exception " + e, e instanceof ReconfigurationTimeoutException);
}
// the two new peers have not started yet, the bootstrapping must timeout
LOG.info(cluster.printServers());
// state so that we still get timeout instead of in-progress exception
try {
sender.sendRequest(request);
Assert.fail("did not get expected exception");
} catch (IOException e) {
Assert.assertTrue("Got exception " + e, e instanceof ReconfigurationTimeoutException);
}
// start the two new peers
LOG.info("Start new peers");
for (RaftPeer np : c1.newPeers) {
cluster.restartServer(np.getId(), false);
}
Assert.assertTrue(client.admin().setConfiguration(c1.allPeersInNewConf).isSuccess());
}
}
use of org.apache.ratis.client.RaftClient in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method runTestKillLeaderDuringReconf.
void runTestKillLeaderDuringReconf(CLUSTER cluster) throws Exception {
final AtomicBoolean clientRunning = new AtomicBoolean(true);
Thread clientThread = null;
try {
final RaftPeerId leaderId = RaftTestUtil.waitForLeader(cluster).getId();
PeerChanges c1 = cluster.addNewPeers(1, false);
PeerChanges c2 = cluster.removePeers(1, false, asList(c1.newPeers));
LOG.info("Start setConf: {}", asList(c2.allPeersInNewConf));
LOG.info(cluster.printServers());
final CompletableFuture<Void> setConf = new CompletableFuture<>();
clientThread = new Thread(() -> {
try (final RaftClient client = cluster.createClient(leaderId)) {
for (int i = 0; clientRunning.get() && !setConf.isDone(); i++) {
final RaftClientReply reply = client.admin().setConfiguration(c2.allPeersInNewConf);
if (reply.isSuccess()) {
setConf.complete(null);
}
LOG.info("setConf attempt #{} failed, {}", i, cluster.printServers());
}
} catch (Exception e) {
LOG.error("Failed to setConf", e);
setConf.completeExceptionally(e);
}
});
clientThread.start();
TimeUnit.SECONDS.sleep(1);
// the leader cannot generate the (old, new) conf, and it will keep
// bootstrapping the 2 new peers since they have not started yet
Assert.assertFalse(((RaftConfigurationImpl) cluster.getLeader().getRaftConf()).isTransitional());
// only (0) the first conf entry, (1) the 1st setConf entry and (2) a metadata entry
{
final RaftLog leaderLog = cluster.getLeader().getRaftLog();
for (LogEntryProto e : RaftTestUtil.getLogEntryProtos(leaderLog)) {
LOG.info("{}", LogProtoUtils.toLogEntryString(e));
}
final long commitIndex = leaderLog.getLastCommittedIndex();
Assert.assertTrue("commitIndex = " + commitIndex + " > 2", commitIndex <= 2);
}
final RaftPeerId killed = RaftTestUtil.waitAndKillLeader(cluster);
Assert.assertEquals(leaderId, killed);
final RaftPeerId newLeaderId = RaftTestUtil.waitForLeader(cluster).getId();
LOG.info("newLeaderId: {}", newLeaderId);
LOG.info("start new peers: {}", Arrays.asList(c1.newPeers));
for (RaftPeer np : c1.newPeers) {
cluster.restartServer(np.getId(), false);
}
try {
setConf.get(10, TimeUnit.SECONDS);
} catch (TimeoutException ignored) {
}
// the client fails with the first leader, and then retry the same setConfiguration request
waitAndCheckNewConf(cluster, c2.allPeersInNewConf, 2, Collections.singletonList(leaderId));
setConf.get(1, TimeUnit.SECONDS);
} finally {
if (clientThread != null) {
clientRunning.set(false);
clientThread.interrupt();
}
}
}
use of org.apache.ratis.client.RaftClient in project incubator-ratis by apache.
the class RaftReconfigurationBaseTest method runTestRevertConfigurationChange.
void runTestRevertConfigurationChange(CLUSTER cluster) throws Exception {
RaftLogBase log2 = null;
try {
RaftTestUtil.waitForLeader(cluster);
final RaftServer.Division leader = cluster.getLeader();
final RaftPeerId leaderId = leader.getId();
final RaftLog log = leader.getRaftLog();
log2 = (RaftLogBase) log;
Thread.sleep(1000);
// we block the incoming msg for the leader and block its requests to
// followers, so that we force the leader change and the old leader will
// not know
LOG.info("start blocking the leader");
BlockRequestHandlingInjection.getInstance().blockReplier(leaderId.toString());
cluster.setBlockRequestsFrom(leaderId.toString(), true);
PeerChanges change = cluster.removePeers(1, false, new ArrayList<>());
AtomicBoolean gotNotLeader = new AtomicBoolean(false);
final Thread clientThread = new Thread(() -> {
try (final RaftClient client = cluster.createClient(leaderId)) {
LOG.info("client starts to change conf");
final RaftClientRpc sender = client.getClientRpc();
RaftClientReply reply = sender.sendRequest(cluster.newSetConfigurationRequest(client.getId(), leaderId, change.allPeersInNewConf));
if (reply.getNotLeaderException() != null) {
gotNotLeader.set(true);
}
} catch (IOException e) {
LOG.warn("Got unexpected exception when client1 changes conf", e);
}
});
clientThread.start();
// find ConfigurationEntry
final TimeDuration sleepTime = TimeDuration.valueOf(500, TimeUnit.MILLISECONDS);
final long confIndex = JavaUtils.attemptRepeatedly(() -> {
final long last = log.getLastEntryTermIndex().getIndex();
for (long i = last; i >= 1; i--) {
if (log.get(i).hasConfigurationEntry()) {
return i;
}
}
throw new Exception("ConfigurationEntry not found: last=" + last);
}, 10, sleepTime, "confIndex", LOG);
// wait till the old leader persist the new conf
JavaUtils.attemptRepeatedly(() -> {
Assert.assertTrue(log.getFlushIndex() >= confIndex);
return null;
}, 10, sleepTime, "FLUSH", LOG);
final long committed = log.getLastCommittedIndex();
Assert.assertTrue(committed < confIndex);
// unblock the old leader
BlockRequestHandlingInjection.getInstance().unblockReplier(leaderId.toString());
cluster.setBlockRequestsFrom(leaderId.toString(), false);
// the client should get NotLeaderException
clientThread.join(5000);
Assert.assertTrue(gotNotLeader.get());
// the old leader should have truncated the setConf from the log
JavaUtils.attemptRepeatedly(() -> {
Assert.assertTrue(log.getLastCommittedIndex() >= confIndex);
return null;
}, 10, ONE_SECOND, "COMMIT", LOG);
Assert.assertTrue(log.get(confIndex).hasConfigurationEntry());
log2 = null;
} finally {
RaftStorageTestUtils.printLog(log2, s -> LOG.info(s));
}
}
Aggregations