use of org.apache.ratis.protocol.RaftClientReply in project incubator-ratis by apache.
the class PauseCommand method run.
@Override
public int run(CommandLine cl) throws IOException {
super.run(cl);
String strAddr = cl.getOptionValue(ADDRESS_OPTION_NAME);
final RaftPeerId peerId = getRaftGroup().getPeers().stream().filter(p -> p.getAddress().equals(strAddr)).findAny().map(RaftPeer::getId).orElse(null);
if (peerId == null) {
printf("Peer not found: %s", strAddr);
return -1;
}
try (final RaftClient raftClient = RaftUtils.createClient(getRaftGroup())) {
RaftClientReply reply = raftClient.getLeaderElectionManagementApi(peerId).pause();
processReply(reply, () -> String.format("Failed to pause leader election on peer %s", strAddr));
printf(String.format("Successful pause leader election on peer %s", strAddr));
}
return 0;
}
use of org.apache.ratis.protocol.RaftClientReply in project incubator-ratis by apache.
the class StepDownCommand method run.
@Override
public int run(CommandLine cl) throws IOException {
super.run(cl);
try (RaftClient client = RaftUtils.createClient(getRaftGroup())) {
final RaftClientReply transferLeadershipReply = client.admin().transferLeadership(null, 60_000);
processReply(transferLeadershipReply, () -> "Failed to step down leader");
} catch (Throwable t) {
printf("caught an error when executing step down leader: %s%n", t.getMessage());
return -1;
}
println("Step down leader successfully");
return 0;
}
use of org.apache.ratis.protocol.RaftClientReply in project incubator-ratis by apache.
the class TransferCommand method run.
@Override
public int run(CommandLine cl) throws IOException {
super.run(cl);
String strAddr = cl.getOptionValue(ADDRESS_OPTION_NAME);
RaftPeerId newLeaderId = null;
// update priorities to enable transfer
List<RaftPeer> peersWithNewPriorities = new ArrayList<>();
for (RaftPeer peer : getRaftGroup().getPeers()) {
peersWithNewPriorities.add(RaftPeer.newBuilder(peer).setPriority(peer.getAddress().equals(strAddr) ? 2 : 1).build());
if (peer.getAddress().equals(strAddr)) {
newLeaderId = peer.getId();
}
}
if (newLeaderId == null) {
return -2;
}
try (RaftClient client = RaftUtils.createClient(getRaftGroup())) {
String stringPeers = "[" + peersWithNewPriorities.stream().map(RaftPeer::toString).collect(Collectors.joining(", ")) + "]";
printf("Applying new peer state before transferring leadership: %n%s%n", stringPeers);
RaftClientReply setConfigurationReply = client.admin().setConfiguration(peersWithNewPriorities);
processReply(setConfigurationReply, () -> "failed to set priorities before initiating election");
// transfer leadership
printf("Transferring leadership to server with address <%s> %n", strAddr);
try {
Thread.sleep(3_000);
RaftClientReply transferLeadershipReply = client.admin().transferLeadership(newLeaderId, 60_000);
processReply(transferLeadershipReply, () -> "election failed");
} catch (Throwable t) {
printf("caught an error when executing transfer: %s%n", t.getMessage());
return -1;
}
println("Transferring leadership initiated");
}
return 0;
}
use of org.apache.ratis.protocol.RaftClientReply in project incubator-ratis by apache.
the class ResumeCommand method run.
@Override
public int run(CommandLine cl) throws IOException {
super.run(cl);
String strAddr = cl.getOptionValue(ADDRESS_OPTION_NAME);
final RaftPeerId peerId = getRaftGroup().getPeers().stream().filter(p -> p.getAddress().equals(strAddr)).findAny().map(RaftPeer::getId).orElse(null);
if (peerId == null) {
printf("Can't find a sever with the address:%s", strAddr);
return -1;
}
try (final RaftClient raftClient = RaftUtils.createClient(getRaftGroup())) {
RaftClientReply reply = raftClient.getLeaderElectionManagementApi(peerId).resume();
processReply(reply, () -> String.format("Failed to resume leader election on peer %s", strAddr));
printf(String.format("Successful pause leader election on peer %s", strAddr));
}
return 0;
}
use of org.apache.ratis.protocol.RaftClientReply in project incubator-ratis by apache.
the class RaftSnapshotBaseTest method testBasicInstallSnapshot.
/**
* Basic test for install snapshot: start a one node cluster and let it
* generate a snapshot. Then delete the log and restart the node, and add more
* nodes as followers.
*/
@Test
public void testBasicInstallSnapshot() throws Exception {
final List<LogSegmentPath> logs;
int i = 0;
try {
RaftTestUtil.waitForLeader(cluster);
final RaftPeerId leaderId = cluster.getLeader().getId();
try (final RaftClient client = cluster.createClient(leaderId)) {
for (; i < SNAPSHOT_TRIGGER_THRESHOLD * 2 - 1; i++) {
RaftClientReply reply = client.io().send(new SimpleMessage("m" + i));
Assert.assertTrue(reply.isSuccess());
}
}
// wait for the snapshot to be done
final long nextIndex = cluster.getLeader().getRaftLog().getNextIndex();
LOG.info("nextIndex = {}", nextIndex);
final List<File> snapshotFiles = getSnapshotFiles(cluster, nextIndex - SNAPSHOT_TRIGGER_THRESHOLD, nextIndex);
JavaUtils.attemptRepeatedly(() -> {
Assert.assertTrue(snapshotFiles.stream().anyMatch(RaftSnapshotBaseTest::exists));
return null;
}, 10, ONE_SECOND, "snapshotFile.exist", LOG);
verifyTakeSnapshotMetric(cluster.getLeader());
logs = LogSegmentPath.getLogSegmentPaths(cluster.getLeader().getRaftStorage());
} finally {
cluster.shutdown();
}
// delete the log segments from the leader
for (LogSegmentPath path : logs) {
FileUtils.delete(path.getPath());
}
// restart the peer
LOG.info("Restarting the cluster");
cluster.restart(false);
try {
assertLeaderContent(cluster);
// generate some more traffic
try (final RaftClient client = cluster.createClient(cluster.getLeader().getId())) {
Assert.assertTrue(client.io().send(new SimpleMessage("m" + i)).isSuccess());
}
// add two more peers
String[] newPeers = new String[] { "s3", "s4" };
MiniRaftCluster.PeerChanges change = cluster.addNewPeers(newPeers, true, false);
// trigger setConfiguration
cluster.setConfiguration(change.allPeersInNewConf);
for (String newPeer : newPeers) {
final RaftServer.Division s = cluster.getDivision(RaftPeerId.valueOf(newPeer));
SimpleStateMachine4Testing simpleStateMachine = SimpleStateMachine4Testing.get(s);
Assert.assertSame(LifeCycle.State.RUNNING, simpleStateMachine.getLifeCycleState());
}
// Verify installSnapshot counter on leader before restart.
verifyInstallSnapshotMetric(cluster.getLeader());
RaftServerTestUtil.waitAndCheckNewConf(cluster, change.allPeersInNewConf, 0, null);
Timer timer = getTakeSnapshotTimer(cluster.getLeader());
long count = timer.getCount();
// restart the peer and check if it can correctly handle conf change
cluster.restartServer(cluster.getLeader().getId(), false);
assertLeaderContent(cluster);
// verify that snapshot was taken when stopping the server
Assert.assertTrue(count < timer.getCount());
} finally {
cluster.shutdown();
}
}
Aggregations