use of org.opendaylight.controller.cluster.raft.persisted.Snapshot in project controller by opendaylight.
the class RaftActorTest method testGetSnapshot.
@Test
public void testGetSnapshot() throws Exception {
TEST_LOG.info("testGetSnapshot starting");
final TestKit kit = new TestKit(getSystem());
String persistenceId = factory.generateActorId("test-actor-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
long term = 3;
long seqN = 1;
InMemoryJournal.addEntry(persistenceId, seqN++, new UpdateElectionTerm(term, "member-1"));
InMemoryJournal.addEntry(persistenceId, seqN++, new SimpleReplicatedLogEntry(0, term, new MockRaftActorContext.MockPayload("A")));
InMemoryJournal.addEntry(persistenceId, seqN++, new SimpleReplicatedLogEntry(1, term, new MockRaftActorContext.MockPayload("B")));
InMemoryJournal.addEntry(persistenceId, seqN++, new ApplyJournalEntries(1));
InMemoryJournal.addEntry(persistenceId, seqN++, new SimpleReplicatedLogEntry(2, term, new MockRaftActorContext.MockPayload("C")));
TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId, ImmutableMap.<String, String>builder().put("member1", "address").build(), config).withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
MockRaftActor mockRaftActor = raftActorRef.underlyingActor();
mockRaftActor.waitForRecoveryComplete();
mockRaftActor.snapshotCohortDelegate = mock(RaftActorSnapshotCohort.class);
raftActorRef.tell(GetSnapshot.INSTANCE, kit.getRef());
ArgumentCaptor<ActorRef> replyActor = ArgumentCaptor.forClass(ActorRef.class);
verify(mockRaftActor.snapshotCohortDelegate, timeout(5000)).createSnapshot(replyActor.capture(), eq(java.util.Optional.empty()));
byte[] stateSnapshot = new byte[] { 1, 2, 3 };
replyActor.getValue().tell(new CaptureSnapshotReply(ByteState.of(stateSnapshot), java.util.Optional.empty()), ActorRef.noSender());
GetSnapshotReply reply = kit.expectMsgClass(GetSnapshotReply.class);
assertEquals("getId", persistenceId, reply.getId());
Snapshot replySnapshot = reply.getSnapshot();
assertEquals("getElectionTerm", term, replySnapshot.getElectionTerm());
assertEquals("getElectionVotedFor", "member-1", replySnapshot.getElectionVotedFor());
assertEquals("getLastAppliedIndex", 1L, replySnapshot.getLastAppliedIndex());
assertEquals("getLastAppliedTerm", term, replySnapshot.getLastAppliedTerm());
assertEquals("getLastIndex", 2L, replySnapshot.getLastIndex());
assertEquals("getLastTerm", term, replySnapshot.getLastTerm());
assertEquals("getState", ByteState.of(stateSnapshot), replySnapshot.getState());
assertEquals("getUnAppliedEntries size", 1, replySnapshot.getUnAppliedEntries().size());
assertEquals("UnApplied entry index ", 2L, replySnapshot.getUnAppliedEntries().get(0).getIndex());
// Test with timeout
mockRaftActor.getSnapshotMessageSupport().setSnapshotReplyActorTimeout(Duration.create(200, TimeUnit.MILLISECONDS));
reset(mockRaftActor.snapshotCohortDelegate);
raftActorRef.tell(GetSnapshot.INSTANCE, kit.getRef());
Failure failure = kit.expectMsgClass(akka.actor.Status.Failure.class);
assertEquals("Failure cause type", TimeoutException.class, failure.cause().getClass());
mockRaftActor.getSnapshotMessageSupport().setSnapshotReplyActorTimeout(Duration.create(30, TimeUnit.SECONDS));
// Test with persistence disabled.
mockRaftActor.setPersistence(false);
reset(mockRaftActor.snapshotCohortDelegate);
raftActorRef.tell(GetSnapshot.INSTANCE, kit.getRef());
reply = kit.expectMsgClass(GetSnapshotReply.class);
verify(mockRaftActor.snapshotCohortDelegate, never()).createSnapshot(anyObject(), anyObject());
assertEquals("getId", persistenceId, reply.getId());
replySnapshot = reply.getSnapshot();
assertEquals("getElectionTerm", term, replySnapshot.getElectionTerm());
assertEquals("getElectionVotedFor", "member-1", replySnapshot.getElectionVotedFor());
assertEquals("getLastAppliedIndex", -1L, replySnapshot.getLastAppliedIndex());
assertEquals("getLastAppliedTerm", -1L, replySnapshot.getLastAppliedTerm());
assertEquals("getLastIndex", -1L, replySnapshot.getLastIndex());
assertEquals("getLastTerm", -1L, replySnapshot.getLastTerm());
assertEquals("getState type", EmptyState.INSTANCE, replySnapshot.getState());
assertEquals("getUnAppliedEntries size", 0, replySnapshot.getUnAppliedEntries().size());
TEST_LOG.info("testGetSnapshot ending");
}
use of org.opendaylight.controller.cluster.raft.persisted.Snapshot in project controller by opendaylight.
the class ReplicationAndSnapshotsIntegrationTest method testFirstSnapshot.
/**
* Send a payload to the TestRaftActor to persist and replicate. Since snapshotBatchCount is set to
* 4 and we already have 3 entries in the journal log, this should initiate a snapshot. In this
* scenario, the follower consensus and application of state is delayed until after the snapshot
* completes.
*/
private void testFirstSnapshot() throws Exception {
testLog.info("testFirstSnapshot starting");
expSnapshotState.add(recoveredPayload0);
expSnapshotState.add(recoveredPayload1);
expSnapshotState.add(recoveredPayload2);
// Delay the consensus by temporarily dropping the AppendEntries to both followers.
follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Send the payload.
payload3 = sendPayloadData(leaderActor, "three");
// Wait for snapshot complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
// The snapshot index should not be advanced nor the log trimmed because replicatedToAllIndex
// is behind due the followers not being replicated yet via AppendEntries.
assertEquals("Leader snapshot term", initialTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 2, leaderContext.getReplicatedLog().size());
assertEquals("Leader journal last index", 3, leaderContext.getReplicatedLog().lastIndex());
// Verify the persisted snapshot in the leader. This should reflect the advanced snapshot index as
// the last applied log entry (2) even though the leader hasn't yet advanced its cached snapshot index.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), initialTerm, 2, currentTerm, 3);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 3, payload3);
// The leader's persisted journal log should be cleared since we snapshotted.
List<SimpleReplicatedLogEntry> persistedLeaderJournal = InMemoryJournal.get(leaderId, SimpleReplicatedLogEntry.class);
assertEquals("Persisted journal log size", 0, persistedLeaderJournal.size());
// Allow AppendEntries to both followers to proceed. This should catch up the followers and cause a
// "fake" snapshot in the leader to advance the snapshot index to 2. Also the state should be applied
// in all members (via ApplyState).
follower1Actor.underlyingActor().stopDropMessages(AppendEntries.class);
follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
verifyApplyJournalEntries(leaderCollectorActor, 3);
assertEquals("Leader commit index", 3, leaderContext.getCommitIndex());
applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
verifyApplyState(applyState, null, null, currentTerm, 3, payload3);
verifyApplyJournalEntries(follower1CollectorActor, 3);
applyState = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, ApplyState.class);
verifyApplyState(applyState, null, null, currentTerm, 3, payload3);
verifyApplyJournalEntries(follower2CollectorActor, 3);
assertEquals("Leader snapshot term", initialTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", 2, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
assertEquals("Leader commit index", 3, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 3, leaderContext.getLastApplied());
assertEquals("Leader replicatedToAllIndex", 2, leader.getReplicatedToAllIndex());
// The followers should also snapshot so verify.
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, SaveSnapshotSuccess.class);
persistedSnapshots = InMemorySnapshotStore.getSnapshots(follower1Id, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
// The last applied index in the snapshot may or may not be the last log entry depending on
// timing so to avoid intermittent test failures, we'll just verify the snapshot's last term/index.
assertEquals("Follower1 Snapshot getLastTerm", currentTerm, persistedSnapshots.get(0).getLastTerm());
assertEquals("Follower1 Snapshot getLastIndex", 3, persistedSnapshots.get(0).getLastIndex());
MessageCollectorActor.expectFirstMatching(follower2CollectorActor, SaveSnapshotSuccess.class);
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.clearMessages(follower1CollectorActor);
MessageCollectorActor.clearMessages(follower2CollectorActor);
testLog.info("testFirstSnapshot ending");
}
use of org.opendaylight.controller.cluster.raft.persisted.Snapshot in project controller by opendaylight.
the class ReplicationAndSnapshotsIntegrationTest method testSecondSnapshot.
/**
* Send one more payload to trigger another snapshot. In this scenario, we delay the snapshot until
* consensus occurs and the leader applies the state.
*/
private void testSecondSnapshot() throws Exception {
testLog.info("testSecondSnapshot starting");
expSnapshotState.add(payload3);
expSnapshotState.add(payload4);
expSnapshotState.add(payload5);
expSnapshotState.add(payload6);
// Delay the CaptureSnapshot message to the leader actor.
leaderActor.underlyingActor().startDropMessages(CaptureSnapshotReply.class);
// Send the payload.
payload7 = sendPayloadData(leaderActor, "seven");
// Capture the CaptureSnapshotReply message so we can send it later.
final CaptureSnapshotReply captureSnapshotReply = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, CaptureSnapshotReply.class);
// Wait for the state to be applied in the leader.
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, payload7.toString(), currentTerm, 7, payload7);
// At this point the leader has applied the new state but the cached snapshot index should not be
// advanced by a "fake" snapshot because we're in the middle of a snapshot. We'll wait for at least
// one more heartbeat AppendEntriesReply to ensure this does not occur.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", 5, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 2, leaderContext.getReplicatedLog().size());
assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 7, leaderContext.getLastApplied());
assertEquals("Leader replicatedToAllIndex", 5, leader.getReplicatedToAllIndex());
// Now deliver the CaptureSnapshotReply.
leaderActor.underlyingActor().stopDropMessages(CaptureSnapshotReply.class);
leaderActor.tell(captureSnapshotReply, leaderActor);
// Wait for snapshot complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
// Wait for another heartbeat AppendEntriesReply. This should cause a "fake" snapshot to advance the
// snapshot index and trimmed the log since we're no longer in a snapshot.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", 6, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
// Verify the persisted snapshot. This should reflect the snapshot index as the last applied
// log entry (7) and shouldn't contain any unapplied entries as we capture persisted the snapshot data
// when the snapshot is created (ie when the CaptureSnapshot is processed).
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 6, currentTerm, 7);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 7, payload7);
// The leader's persisted journal log should be cleared since we did a snapshot.
List<SimpleReplicatedLogEntry> persistedLeaderJournal = InMemoryJournal.get(leaderId, SimpleReplicatedLogEntry.class);
assertEquals("Persisted journal log size", 0, persistedLeaderJournal.size());
// Verify the followers apply all 4 new log entries.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 4);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
verifyApplyState(applyStates.get(3), null, null, currentTerm, 7, payload7);
applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 4);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
verifyApplyState(applyStates.get(3), null, null, currentTerm, 7, payload7);
// Verify the follower's snapshot index has also advanced. (after another AppendEntries heartbeat
// to be safe).
MessageCollectorActor.clearMessages(follower1CollectorActor);
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
follower1Context = follower1Actor.underlyingActor().getRaftActorContext();
assertEquals("Follower 1 snapshot term", currentTerm, follower1Context.getReplicatedLog().getSnapshotTerm());
assertEquals("Follower 1 snapshot index", 6, follower1Context.getReplicatedLog().getSnapshotIndex());
assertEquals("Follower 1 journal log size", 1, follower1Context.getReplicatedLog().size());
assertEquals("Follower 1 journal last index", 7, follower1Context.getReplicatedLog().lastIndex());
assertEquals("Follower 1 commit index", 7, follower1Context.getCommitIndex());
MessageCollectorActor.clearMessages(follower2CollectorActor);
MessageCollectorActor.expectFirstMatching(follower2CollectorActor, AppendEntries.class);
follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
assertEquals("Follower 2 snapshot term", currentTerm, follower2Context.getReplicatedLog().getSnapshotTerm());
assertEquals("Follower 2 snapshot index", 6, follower2Context.getReplicatedLog().getSnapshotIndex());
assertEquals("Follower 2 journal log size", 1, follower2Context.getReplicatedLog().size());
assertEquals("Follower 2 journal last index", 7, follower2Context.getReplicatedLog().lastIndex());
assertEquals("Follower 2 commit index", 7, follower2Context.getCommitIndex());
expSnapshotState.add(payload7);
testLog.info("testSecondSnapshot ending");
}
use of org.opendaylight.controller.cluster.raft.persisted.Snapshot in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot.
/**
* Send payloads to trigger a leader snapshot due to snapshotBatchCount reached with follower 2
* lagging where the leader trims its log from the last applied index. Follower 2's log
* will be behind by several entries and, when it is resumed, it should be caught up via a snapshot
* installed by the leader.
*/
@Test
public void testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot() throws Exception {
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot starting");
setup();
sendInitialPayloadsReplicatedToAllFollowers("zero", "one");
// Configure follower 2 to drop messages and lag.
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5, TimeUnit.MILLISECONDS);
// Send 5 payloads - the second should cause a leader snapshot.
final MockPayload payload2 = sendPayloadData(leaderActor, "two");
final MockPayload payload3 = sendPayloadData(leaderActor, "three");
final MockPayload payload4 = sendPayloadData(leaderActor, "four");
final MockPayload payload5 = sendPayloadData(leaderActor, "five");
final MockPayload payload6 = sendPayloadData(leaderActor, "six");
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
// Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 5);
verifyApplyState(applyStates.get(0), leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
verifyApplyState(applyStates.get(2), leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
verifyApplyState(applyStates.get(4), leaderCollectorActor, payload6.toString(), currentTerm, 6, payload6);
MessageCollectorActor.clearMessages(leaderCollectorActor);
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot: " + "sending 1 more payload to trigger second snapshot");
// Send another payload to trigger a second leader snapshot.
MockPayload payload7 = sendPayloadData(leaderActor, "seven");
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, payload7.toString(), currentTerm, 7, payload7);
// Verify follower 1 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 6);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 2, payload2);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(5), null, null, currentTerm, 7, payload7);
// The snapshot should have caused the leader to advanced the snapshot index to the leader's last
// applied index (6) since the log size should have exceed the snapshot batch count (4).
// replicatedToAllIndex should remain at 1 since follower 2 is lagging.
verifyLeadersTrimmedLog(7, 1);
expSnapshotState.add(payload2);
expSnapshotState.add(payload3);
expSnapshotState.add(payload4);
expSnapshotState.add(payload5);
expSnapshotState.add(payload6);
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.clearMessages(follower1CollectorActor);
// Send a server config change to test that the install snapshot includes the server config.
ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false), new ServerInfo(follower2Id, false)));
leaderContext.updatePeerIds(serverConfig);
((AbstractLeader) leader).updateMinReplicaCount();
leaderActor.tell(serverConfig, ActorRef.noSender());
applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, "serverConfig", currentTerm, 8, serverConfig);
applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
verifyApplyState(applyState, null, null, currentTerm, 8, serverConfig);
// Verify the leader's persisted snapshot.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 6, currentTerm, 7);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 7, payload7);
expSnapshotState.add(payload7);
verifyInstallSnapshotToLaggingFollower(8, serverConfig);
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot complete");
}
use of org.opendaylight.controller.cluster.raft.persisted.Snapshot in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower.
/**
* Send payloads with follower 2 lagging with the last payload having a large enough size to trigger a
* leader snapshot such that the leader trims its log from the last applied index.. Follower 2's log will
* be behind by several entries and, when it is resumed, it should be caught up via a snapshot installed
* by the leader.
*/
@Test
public void testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower() throws Exception {
testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower starting");
snapshotBatchCount = 5;
setup();
sendInitialPayloadsReplicatedToAllFollowers("zero");
leaderActor.underlyingActor().setMockTotalMemory(1000);
// We'll expect a ReplicatedLogImplEntry message and an ApplyJournalEntries message added to the journal.
InMemoryJournal.addWriteMessagesCompleteLatch(leaderId, 2);
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5, TimeUnit.MILLISECONDS);
// Send a payload with a large relative size but not enough to trigger a snapshot.
MockPayload payload1 = sendPayloadData(leaderActor, "one", 500);
// Verify the leader got consensus and applies the first log entry even though follower 2 didn't respond.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 1);
verifyApplyState(applyStates.get(0), leaderCollectorActor, payload1.toString(), currentTerm, 1, payload1);
// Wait for all the ReplicatedLogImplEntry and ApplyJournalEntries messages to be added to the journal
// before the snapshot so the snapshot sequence # will be higher to ensure the snapshot gets
// purged from the snapshot store after subsequent snapshots.
InMemoryJournal.waitForWriteMessagesComplete(leaderId);
// Verify a snapshot is not triggered.
CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
expSnapshotState.add(payload1);
// Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5, TimeUnit.MILLISECONDS);
// Send another payload with a large enough relative size in combination with the last payload
// that exceeds the memory threshold (70% * 1000 = 700) - this should do a snapshot.
MockPayload payload2 = sendPayloadData(leaderActor, "two", 201);
// Verify the leader applies the last log entry.
applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(1), leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
// Verify follower 1 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 1, payload1);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 2, payload2);
// A snapshot should've occurred - wait for it to complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
// Because the snapshot was triggered by exceeding the memory threshold the leader should've advanced
// the snapshot index to the last applied index and trimmed the log even though the entries weren't
// replicated to all followers.
verifyLeadersTrimmedLog(2, 0);
// Verify the leader's persisted snapshot.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 1, currentTerm, 2);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 2, payload2);
expSnapshotState.add(payload2);
verifyInstallSnapshotToLaggingFollower(2L, null);
// Sends a payload with index 3.
verifyNoSubsequentSnapshotAfterMemoryThresholdExceededSnapshot();
// Sends 3 payloads with indexes 4, 5 and 6.
long leadersSnapshotIndexOnRecovery = verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot();
// Recover the leader from persistence and verify.
long leadersLastIndexOnRecovery = 6;
long leadersFirstJournalEntryIndexOnRecovery = leadersSnapshotIndexOnRecovery + 1;
verifyLeaderRecoveryAfterReinstatement(leadersLastIndexOnRecovery, leadersSnapshotIndexOnRecovery, leadersFirstJournalEntryIndexOnRecovery);
testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceeded ending");
}
Aggregations