use of com.alipay.sofa.jraft.Node in project sofa-jraft by sofastack.
the class NodeTest method readCommittedUserLog.
@Test
public void readCommittedUserLog() throws Exception {
// setup cluster
final List<PeerId> peers = TestUtils.generatePeers(3);
final TestCluster cluster = new TestCluster("unitest", this.dataPath, peers, 1000);
for (final PeerId peer : peers) {
assertTrue(cluster.start(peer.getEndpoint()));
}
cluster.waitLeader();
final Node leader = cluster.getLeader();
assertNotNull(leader);
this.sendTestTaskAndWait(leader);
// index == 1 is a CONFIGURATION log, so real_index will be 2 when returned.
UserLog userLog = leader.readCommittedUserLog(1);
assertNotNull(userLog);
assertEquals(2, userLog.getIndex());
assertEquals("hello0", new String(userLog.getData().array()));
// index == 5 is a DATA log(a user log)
userLog = leader.readCommittedUserLog(5);
assertNotNull(userLog);
assertEquals(5, userLog.getIndex());
assertEquals("hello3", new String(userLog.getData().array()));
// index == 15 is greater than last_committed_index
try {
assertNull(leader.readCommittedUserLog(15));
fail();
} catch (final LogIndexOutOfBoundsException e) {
assertEquals(e.getMessage(), "Request index 15 is greater than lastAppliedIndex: 11");
}
// index == 0 invalid request
try {
assertNull(leader.readCommittedUserLog(0));
fail();
} catch (final LogIndexOutOfBoundsException e) {
assertEquals(e.getMessage(), "Request index is invalid: 0");
}
LOG.info("Trigger leader snapshot");
CountDownLatch latch = new CountDownLatch(1);
leader.snapshot(new ExpectClosure(latch));
waitLatch(latch);
// remove and add a peer to add two CONFIGURATION logs
final List<Node> followers = cluster.getFollowers();
assertEquals(2, followers.size());
final Node testFollower = followers.get(0);
latch = new CountDownLatch(1);
leader.removePeer(testFollower.getNodeId().getPeerId(), new ExpectClosure(latch));
waitLatch(latch);
latch = new CountDownLatch(1);
leader.addPeer(testFollower.getNodeId().getPeerId(), new ExpectClosure(latch));
waitLatch(latch);
this.sendTestTaskAndWait(leader, 10, RaftError.SUCCESS);
// trigger leader snapshot for the second time, after this the log of index 1~11 will be deleted.
LOG.info("Trigger leader snapshot");
latch = new CountDownLatch(1);
leader.snapshot(new ExpectClosure(latch));
waitLatch(latch);
Thread.sleep(100);
// index == 5 log has been deleted in log_storage.
try {
leader.readCommittedUserLog(5);
fail();
} catch (final LogNotFoundException e) {
assertEquals("User log is deleted at index: 5", e.getMessage());
}
// index == 12、index == 13、index=14、index=15 are 4 CONFIGURATION logs(joint consensus), so real_index will be 16 when returned.
userLog = leader.readCommittedUserLog(12);
assertNotNull(userLog);
assertEquals(16, userLog.getIndex());
assertEquals("hello10", new String(userLog.getData().array()));
// now index == 17 is a user log
userLog = leader.readCommittedUserLog(17);
assertNotNull(userLog);
assertEquals(17, userLog.getIndex());
assertEquals("hello11", new String(userLog.getData().array()));
cluster.ensureSame();
assertEquals(3, cluster.getFsms().size());
for (final MockStateMachine fsm : cluster.getFsms()) {
assertEquals(20, fsm.getLogs().size());
for (int i = 0; i < 20; i++) {
assertEquals("hello" + i, new String(fsm.getLogs().get(i).array()));
}
}
cluster.stopAll();
}
use of com.alipay.sofa.jraft.Node in project sofa-jraft by sofastack.
the class NodeTest method testTripleNodesWithLearners.
@Test
public void testTripleNodesWithLearners() throws Exception {
final List<PeerId> peers = TestUtils.generatePeers(3);
final TestCluster cluster = new TestCluster("unittest", this.dataPath, peers);
for (final PeerId peer : peers) {
assertTrue(cluster.start(peer.getEndpoint()));
}
// elect leader
cluster.waitLeader();
// get leader
final Node leader = cluster.getLeader();
assertNotNull(leader);
assertEquals(3, leader.listPeers().size());
assertTrue(leader.listLearners().isEmpty());
assertTrue(leader.listAliveLearners().isEmpty());
{
// Adds a learner
SynchronizedClosure done = new SynchronizedClosure();
PeerId learnerPeer = new PeerId(TestUtils.getMyIp(), TestUtils.INIT_PORT + 3);
// Start learner
assertTrue(cluster.startLearner(learnerPeer));
leader.addLearners(Arrays.asList(learnerPeer), done);
assertTrue(done.await().isOk());
assertEquals(1, leader.listAliveLearners().size());
assertEquals(1, leader.listLearners().size());
}
// apply tasks to leader
this.sendTestTaskAndWait(leader);
{
final ByteBuffer data = ByteBuffer.wrap("no closure".getBytes());
final Task task = new Task(data, null);
leader.apply(task);
}
{
// task with TaskClosure
final ByteBuffer data = ByteBuffer.wrap("task closure".getBytes());
final Vector<String> cbs = new Vector<>();
final CountDownLatch latch = new CountDownLatch(1);
final Task task = new Task(data, new TaskClosure() {
@Override
public void run(final Status status) {
cbs.add("apply");
latch.countDown();
}
@Override
public void onCommitted() {
cbs.add("commit");
}
});
leader.apply(task);
latch.await();
assertEquals(2, cbs.size());
assertEquals("commit", cbs.get(0));
assertEquals("apply", cbs.get(1));
}
assertEquals(4, cluster.getFsms().size());
assertEquals(2, cluster.getFollowers().size());
assertEquals(1, cluster.getLearners().size());
cluster.ensureSame(-1);
{
// Adds another learner
SynchronizedClosure done = new SynchronizedClosure();
PeerId learnerPeer = new PeerId(TestUtils.getMyIp(), TestUtils.INIT_PORT + 4);
// Start learner
assertTrue(cluster.startLearner(learnerPeer));
leader.addLearners(Arrays.asList(learnerPeer), done);
assertTrue(done.await().isOk());
assertEquals(2, leader.listAliveLearners().size());
assertEquals(2, leader.listLearners().size());
}
{
// stop two followers
for (Node follower : cluster.getFollowers()) {
assertTrue(cluster.stop(follower.getNodeId().getPeerId().getEndpoint()));
}
// send a new task
final ByteBuffer data = ByteBuffer.wrap("task closure".getBytes());
SynchronizedClosure done = new SynchronizedClosure();
leader.apply(new Task(data, done));
// should fail
assertFalse(done.await().isOk());
assertEquals(RaftError.EPERM, done.getStatus().getRaftError());
// One peer with two learners.
assertEquals(3, cluster.getFsms().size());
cluster.ensureSame(-1);
}
cluster.stopAll();
}
use of com.alipay.sofa.jraft.Node in project sofa-jraft by sofastack.
the class NodeTest method testSetPeer1.
@Test
public void testSetPeer1() throws Exception {
final TestCluster cluster = new TestCluster("testSetPeer1", this.dataPath, new ArrayList<>());
final PeerId bootPeer = new PeerId(TestUtils.getMyIp(), TestUtils.INIT_PORT);
assertTrue(cluster.start(bootPeer.getEndpoint()));
final List<Node> nodes = cluster.getFollowers();
assertEquals(1, nodes.size());
final List<PeerId> peers = new ArrayList<>();
peers.add(bootPeer);
// reset peers from empty
assertTrue(nodes.get(0).resetPeers(new Configuration(peers)).isOk());
cluster.waitLeader();
assertNotNull(cluster.getLeader());
cluster.stopAll();
}
use of com.alipay.sofa.jraft.Node in project sofa-jraft by sofastack.
the class NodeTest method testAppendEntriesWhenFollowerIsInErrorState.
@Test
public void testAppendEntriesWhenFollowerIsInErrorState() throws Exception {
// start five nodes
final List<PeerId> peers = TestUtils.generatePeers(5);
final TestCluster cluster = new TestCluster("unitest", this.dataPath, peers, 1000);
for (final PeerId peer : peers) {
assertTrue(cluster.start(peer.getEndpoint()));
}
cluster.waitLeader();
final Node oldLeader = cluster.getLeader();
assertNotNull(oldLeader);
// apply something
this.sendTestTaskAndWait(oldLeader);
// set one follower into error state
final List<Node> followers = cluster.getFollowers();
assertEquals(4, followers.size());
final Node errorNode = followers.get(0);
final PeerId errorPeer = errorNode.getNodeId().getPeerId().copy();
final Endpoint errorFollowerAddr = errorPeer.getEndpoint();
LOG.info("Set follower {} into error state", errorNode);
((NodeImpl) errorNode).onError(new RaftException(EnumOutter.ErrorType.ERROR_TYPE_STATE_MACHINE, new Status(-1, "Follower has something wrong.")));
// increase term by stopping leader and electing a new leader again
final Endpoint oldLeaderAddr = oldLeader.getNodeId().getPeerId().getEndpoint().copy();
assertTrue(cluster.stop(oldLeaderAddr));
cluster.waitLeader();
final Node leader = cluster.getLeader();
assertNotNull(leader);
LOG.info("Elect a new leader {}", leader);
// apply something again
this.sendTestTaskAndWait(leader, 10, RaftError.SUCCESS);
// stop error follower
Thread.sleep(20);
LOG.info("Stop error follower {}", errorNode);
assertTrue(cluster.stop(errorFollowerAddr));
// restart error and old leader
LOG.info("Restart error follower {} and old leader {}", errorFollowerAddr, oldLeaderAddr);
assertTrue(cluster.start(errorFollowerAddr));
assertTrue(cluster.start(oldLeaderAddr));
cluster.ensureSame();
assertEquals(5, cluster.getFsms().size());
for (final MockStateMachine fsm : cluster.getFsms()) {
assertEquals(20, fsm.getLogs().size());
}
cluster.stopAll();
}
use of com.alipay.sofa.jraft.Node in project sofa-jraft by sofastack.
the class NodeTest method testRollbackStateMachineWithReadIndex_Issue317.
/**
* Test rollback stateMachine with readIndex for issue 317:
* https://github.com/sofastack/sofa-jraft/issues/317
*/
@Test
public void testRollbackStateMachineWithReadIndex_Issue317() throws Exception {
final Endpoint addr = new Endpoint(TestUtils.getMyIp(), TestUtils.INIT_PORT);
final PeerId peer = new PeerId(addr, 0);
NodeManager.getInstance().addAddress(addr);
final NodeOptions nodeOptions = createNodeOptionsWithSharedTimer();
final CountDownLatch applyCompleteLatch = new CountDownLatch(1);
final CountDownLatch applyLatch = new CountDownLatch(1);
final CountDownLatch readIndexLatch = new CountDownLatch(1);
final AtomicInteger currentValue = new AtomicInteger(-1);
final String errorMsg = this.testName.getMethodName();
final StateMachine fsm = new StateMachineAdapter() {
@Override
public void onApply(final Iterator iter) {
// Notify that the #onApply is preparing to go.
readIndexLatch.countDown();
// Wait for submitting a read-index request
try {
applyLatch.await();
} catch (InterruptedException e) {
fail();
}
int i = 0;
while (iter.hasNext()) {
byte[] data = iter.next().array();
int v = Bits.getInt(data, 0);
assertEquals(i++, v);
currentValue.set(v);
}
if (i > 0) {
// rollback
currentValue.set(i - 1);
iter.setErrorAndRollback(1, new Status(-1, errorMsg));
applyCompleteLatch.countDown();
}
}
};
nodeOptions.setFsm(fsm);
nodeOptions.setLogUri(this.dataPath + File.separator + "log");
nodeOptions.setRaftMetaUri(this.dataPath + File.separator + "meta");
nodeOptions.setSnapshotUri(this.dataPath + File.separator + "snapshot");
nodeOptions.setInitialConf(new Configuration(Collections.singletonList(peer)));
final Node node = new NodeImpl("unittest", peer);
assertTrue(node.init(nodeOptions));
assertEquals(1, node.listPeers().size());
assertTrue(node.listPeers().contains(peer));
while (!node.isLeader()) {
;
}
int n = 5;
{
// apply tasks
for (int i = 0; i < n; i++) {
byte[] b = new byte[4];
Bits.putInt(b, 0, i);
node.apply(new Task(ByteBuffer.wrap(b), null));
}
}
final AtomicInteger readIndexSuccesses = new AtomicInteger(0);
{
// Submit a read-index, wait for #onApply
readIndexLatch.await();
final CountDownLatch latch = new CountDownLatch(1);
node.readIndex(null, new ReadIndexClosure() {
@Override
public void run(final Status status, final long index, final byte[] reqCtx) {
try {
if (status.isOk()) {
readIndexSuccesses.incrementAndGet();
} else {
assertTrue("Unexpected status: " + status, status.getErrorMsg().contains(errorMsg) || status.getRaftError() == RaftError.ETIMEDOUT || status.getErrorMsg().contains("Invalid state for readIndex: STATE_ERROR"));
}
} finally {
latch.countDown();
}
}
});
// We have already submit a read-index request,
// notify #onApply can go right now
applyLatch.countDown();
// The state machine is in error state, the node should step down.
while (node.isLeader()) {
Thread.sleep(10);
}
latch.await();
applyCompleteLatch.await();
}
// No read-index request succeed.
assertEquals(0, readIndexSuccesses.get());
assertTrue(n - 1 >= currentValue.get());
node.shutdown();
node.join();
}
Aggregations