use of com.alipay.sofa.jraft.closure.SynchronizedClosure in project sofa-jraft by sofastack.
the class SnapshotExecutorTest method testDoSnapshotWithIntervalDist.
@Test
public void testDoSnapshotWithIntervalDist() throws Exception {
final NodeOptions nodeOptions = new NodeOptions();
nodeOptions.setSnapshotLogIndexMargin(5);
Mockito.when(this.node.getOptions()).thenReturn(nodeOptions);
Mockito.when(this.fSMCaller.getLastAppliedIndex()).thenReturn(6L);
final ArgumentCaptor<SaveSnapshotClosure> saveSnapshotClosureArg = ArgumentCaptor.forClass(SaveSnapshotClosure.class);
Mockito.when(this.fSMCaller.onSnapshotSave(saveSnapshotClosureArg.capture())).thenReturn(true);
final SynchronizedClosure done = new SynchronizedClosure();
this.executor.doSnapshot(done);
final SaveSnapshotClosure closure = saveSnapshotClosureArg.getValue();
assertNotNull(closure);
closure.start(RaftOutter.SnapshotMeta.newBuilder().setLastIncludedIndex(6).setLastIncludedTerm(1).build());
closure.run(Status.OK());
done.await();
this.executor.join();
assertEquals(1, this.executor.getLastSnapshotTerm());
assertEquals(6, this.executor.getLastSnapshotIndex());
}
use of com.alipay.sofa.jraft.closure.SynchronizedClosure in project sofa-jraft by sofastack.
the class NodeTest method testReadIndexFromLearner.
@Test
public void testReadIndexFromLearner() throws Exception {
final List<PeerId> peers = TestUtils.generatePeers(3);
final TestCluster cluster = new TestCluster("unittest", this.dataPath, peers);
for (final PeerId peer : peers) {
assertTrue(cluster.start(peer.getEndpoint(), false, 300, true));
}
// elect leader
cluster.waitLeader();
// get leader
final Node leader = cluster.getLeader();
assertNotNull(leader);
assertEquals(3, leader.listPeers().size());
// apply tasks to leader
this.sendTestTaskAndWait(leader);
{
// Adds a learner
SynchronizedClosure done = new SynchronizedClosure();
PeerId learnerPeer = new PeerId(TestUtils.getMyIp(), TestUtils.INIT_PORT + 3);
// Start learner
assertTrue(cluster.startLearner(learnerPeer));
leader.addLearners(Arrays.asList(learnerPeer), done);
assertTrue(done.await().isOk());
assertEquals(1, leader.listAliveLearners().size());
assertEquals(1, leader.listLearners().size());
}
Thread.sleep(100);
// read from learner
Node learner = cluster.getNodes().get(3);
assertNotNull(leader);
assertReadIndex(learner, 12);
assertReadIndex(learner, 12);
cluster.stopAll();
}
use of com.alipay.sofa.jraft.closure.SynchronizedClosure in project sofa-jraft by sofastack.
the class NodeTest method testChangePeersChaosWithSnapshot.
@Test
public void testChangePeersChaosWithSnapshot() throws Exception {
// start cluster
final List<PeerId> peers = new ArrayList<>();
peers.add(new PeerId("127.0.0.1", TestUtils.INIT_PORT));
final TestCluster cluster = new TestCluster("unittest", this.dataPath, peers, 1000);
assertTrue(cluster.start(peers.get(0).getEndpoint(), false, 2));
// start other peers
for (int i = 1; i < 10; i++) {
final PeerId peer = new PeerId("127.0.0.1", TestUtils.INIT_PORT + i);
peers.add(peer);
assertTrue(cluster.start(peer.getEndpoint()));
}
final ChangeArg arg = new ChangeArg(cluster, peers, false, false);
final Future<?> future = startChangePeersThread(arg);
for (int i = 0; i < 5000; ) {
cluster.waitLeader();
final Node leader = cluster.getLeader();
if (leader == null) {
continue;
}
final SynchronizedClosure done = new SynchronizedClosure();
final Task task = new Task(ByteBuffer.wrap(("hello" + i).getBytes()), done);
leader.apply(task);
final Status status = done.await();
if (status.isOk()) {
if (++i % 100 == 0) {
System.out.println("Progress:" + i);
}
} else {
assertEquals(RaftError.EPERM, status.getRaftError());
}
}
arg.stop = true;
future.get();
cluster.waitLeader();
final SynchronizedClosure done = new SynchronizedClosure();
final Node leader = cluster.getLeader();
leader.changePeers(new Configuration(peers), done);
final Status st = done.await();
assertTrue(st.getErrorMsg(), st.isOk());
cluster.ensureSame();
assertEquals(10, cluster.getFsms().size());
for (final MockStateMachine fsm : cluster.getFsms()) {
assertTrue(fsm.getLogs().size() >= 5000);
}
cluster.stopAll();
}
use of com.alipay.sofa.jraft.closure.SynchronizedClosure in project sofa-jraft by sofastack.
the class NodeTest method testChangePeersChaosApplyTasks.
@Test
public void testChangePeersChaosApplyTasks() throws Exception {
// start cluster
final List<PeerId> peers = new ArrayList<>();
peers.add(new PeerId("127.0.0.1", TestUtils.INIT_PORT));
final TestCluster cluster = new TestCluster("unittest", this.dataPath, peers, 1000);
assertTrue(cluster.start(peers.get(0).getEndpoint(), false, 100000));
// start other peers
for (int i = 1; i < 10; i++) {
final PeerId peer = new PeerId("127.0.0.1", TestUtils.INIT_PORT + i);
peers.add(peer);
assertTrue(cluster.start(peer.getEndpoint(), true, 100000));
}
final int threads = 3;
final List<ChangeArg> args = new ArrayList<>();
final List<Future<?>> futures = new ArrayList<>();
final CountDownLatch latch = new CountDownLatch(threads);
for (int t = 0; t < threads; t++) {
final ChangeArg arg = new ChangeArg(cluster, peers, false, true);
args.add(arg);
futures.add(startChangePeersThread(arg));
Utils.runInThread(() -> {
try {
for (int i = 0; i < 5000; ) {
cluster.waitLeader();
final Node leader = cluster.getLeader();
if (leader == null) {
continue;
}
final SynchronizedClosure done = new SynchronizedClosure();
final Task task = new Task(ByteBuffer.wrap(("hello" + i).getBytes()), done);
leader.apply(task);
final Status status = done.await();
if (status.isOk()) {
if (++i % 100 == 0) {
System.out.println("Progress:" + i);
}
} else {
assertEquals(RaftError.EPERM, status.getRaftError());
}
}
} catch (final Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
}
});
}
latch.await();
for (final ChangeArg arg : args) {
arg.stop = true;
}
for (final Future<?> future : futures) {
future.get();
}
cluster.waitLeader();
final SynchronizedClosure done = new SynchronizedClosure();
final Node leader = cluster.getLeader();
leader.changePeers(new Configuration(peers), done);
assertTrue(done.await().isOk());
cluster.ensureSame();
assertEquals(10, cluster.getFsms().size());
try {
for (final MockStateMachine fsm : cluster.getFsms()) {
final int logSize = fsm.getLogs().size();
assertTrue("logSize= " + logSize, logSize >= 5000 * threads);
assertTrue("logSize= " + logSize, logSize - 5000 * threads < 100);
}
} finally {
cluster.stopAll();
}
}
use of com.alipay.sofa.jraft.closure.SynchronizedClosure in project sofa-jraft by sofastack.
the class NodeTest method testChangePeersChaosWithoutSnapshot.
@Test
public void testChangePeersChaosWithoutSnapshot() throws Exception {
// start cluster
final List<PeerId> peers = new ArrayList<>();
peers.add(new PeerId("127.0.0.1", TestUtils.INIT_PORT));
final TestCluster cluster = new TestCluster("unittest", this.dataPath, peers, 1000);
assertTrue(cluster.start(peers.get(0).getEndpoint(), false, 100000));
// start other peers
for (int i = 1; i < 10; i++) {
final PeerId peer = new PeerId("127.0.0.1", TestUtils.INIT_PORT + i);
peers.add(peer);
assertTrue(cluster.start(peer.getEndpoint(), true, 10000));
}
final ChangeArg arg = new ChangeArg(cluster, peers, false, true);
final Future<?> future = startChangePeersThread(arg);
final int tasks = 5000;
for (int i = 0; i < tasks; ) {
cluster.waitLeader();
final Node leader = cluster.getLeader();
if (leader == null) {
continue;
}
final SynchronizedClosure done = new SynchronizedClosure();
final Task task = new Task(ByteBuffer.wrap(("hello" + i).getBytes()), done);
leader.apply(task);
final Status status = done.await();
if (status.isOk()) {
if (++i % 100 == 0) {
System.out.println("Progress:" + i);
}
} else {
assertEquals(RaftError.EPERM, status.getRaftError());
}
}
arg.stop = true;
future.get();
cluster.waitLeader();
final SynchronizedClosure done = new SynchronizedClosure();
final Node leader = cluster.getLeader();
leader.changePeers(new Configuration(peers), done);
assertTrue(done.await().isOk());
cluster.ensureSame();
assertEquals(10, cluster.getFsms().size());
for (final MockStateMachine fsm : cluster.getFsms()) {
assertTrue(fsm.getLogs().size() >= tasks);
assertTrue(fsm.getLogs().size() - tasks < 100);
}
cluster.stopAll();
}
Aggregations