use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class LeaderTest method testHandleAppendEntriesReplyFailureWithFollowersLogEmpty.
@Test
public void testHandleAppendEntriesReplyFailureWithFollowersLogEmpty() {
logStart("testHandleAppendEntriesReplyFailureWithFollowersLogEmpty");
MockRaftActorContext leaderActorContext = createActorContextWithFollower();
((DefaultConfigParamsImpl) leaderActorContext.getConfigParams()).setHeartBeatInterval(new FiniteDuration(1000, TimeUnit.SECONDS));
leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1).build());
long leaderCommitIndex = 1;
leaderActorContext.setCommitIndex(leaderCommitIndex);
leaderActorContext.setLastApplied(leaderCommitIndex);
final ReplicatedLogEntry leadersFirstLogEntry = leaderActorContext.getReplicatedLog().get(0);
final ReplicatedLogEntry leadersSecondLogEntry = leaderActorContext.getReplicatedLog().get(1);
MockRaftActorContext followerActorContext = createFollowerActorContextWithLeader();
followerActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
followerActorContext.setCommitIndex(-1);
followerActorContext.setLastApplied(-1);
Follower follower = new Follower(followerActorContext);
followerActor.underlyingActor().setBehavior(follower);
followerActorContext.setCurrentBehavior(follower);
leader = new Leader(leaderActorContext);
AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
final AppendEntriesReply appendEntriesReply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
MessageCollectorActor.clearMessages(followerActor);
MessageCollectorActor.clearMessages(leaderActor);
// Verify initial AppendEntries sent with the leader's current commit index.
assertEquals("getLeaderCommit", -1, appendEntries.getLeaderCommit());
assertEquals("Log entries size", 0, appendEntries.getEntries().size());
assertEquals("getPrevLogIndex", 0, appendEntries.getPrevLogIndex());
leaderActor.underlyingActor().setBehavior(leader);
leaderActorContext.setCurrentBehavior(leader);
leader.handleMessage(followerActor, appendEntriesReply);
MessageCollectorActor.expectMatching(leaderActor, AppendEntriesReply.class, 1);
appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
assertEquals("getLeaderCommit", leaderCommitIndex, appendEntries.getLeaderCommit());
assertEquals("getPrevLogIndex", -1, appendEntries.getPrevLogIndex());
assertEquals("Log entries size", 2, appendEntries.getEntries().size());
assertEquals("First entry index", 0, appendEntries.getEntries().get(0).getIndex());
assertEquals("First entry data", leadersFirstLogEntry.getData(), appendEntries.getEntries().get(0).getData());
assertEquals("Second entry index", 1, appendEntries.getEntries().get(1).getIndex());
assertEquals("Second entry data", leadersSecondLogEntry.getData(), appendEntries.getEntries().get(1).getData());
FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID);
assertEquals("getNextIndex", 2, followerInfo.getNextIndex());
List<ApplyState> applyStateList = MessageCollectorActor.expectMatching(followerActor, ApplyState.class, 2);
ApplyState applyState = applyStateList.get(0);
assertEquals("Follower's first ApplyState index", 0, applyState.getReplicatedLogEntry().getIndex());
assertEquals("Follower's first ApplyState term", 1, applyState.getReplicatedLogEntry().getTerm());
assertEquals("Follower's first ApplyState data", leadersFirstLogEntry.getData(), applyState.getReplicatedLogEntry().getData());
applyState = applyStateList.get(1);
assertEquals("Follower's second ApplyState index", 1, applyState.getReplicatedLogEntry().getIndex());
assertEquals("Follower's second ApplyState term", 1, applyState.getReplicatedLogEntry().getTerm());
assertEquals("Follower's second ApplyState data", leadersSecondLogEntry.getData(), applyState.getReplicatedLogEntry().getData());
assertEquals("Follower's commit index", 1, followerActorContext.getCommitIndex());
assertEquals("Follower's lastIndex", 1, followerActorContext.getReplicatedLog().lastIndex());
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries.
/**
* Send 2 payload instances with follower 2 lagging then resume the follower and verifies it gets
* caught up via AppendEntries.
*/
@Test
public void testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries() throws Exception {
testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries starting: sending 2 new payloads");
setup();
// Simulate lagging by dropping AppendEntries messages in follower 2.
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Send the payloads.
MockPayload payload0 = sendPayloadData(leaderActor, "zero");
MockPayload payload1 = sendPayloadData(leaderActor, "one");
// Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(0), leaderCollectorActor, payload0.toString(), currentTerm, 0, payload0);
verifyApplyState(applyStates.get(1), leaderCollectorActor, payload1.toString(), currentTerm, 1, payload1);
// Verify follower 1 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 0, payload0);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 1, payload1);
// Ensure there's at least 1 more heartbeat.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
// The leader should not have performed fake snapshots to trim the log because the entries have not
// been replicated to follower 2.
assertEquals("Leader snapshot term", -1, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", -1, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 2, leaderContext.getReplicatedLog().size());
assertEquals("Leader journal last index", 1, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", 1, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 1, leaderContext.getLastApplied());
assertEquals("Leader replicatedToAllIndex", -1, leader.getReplicatedToAllIndex());
testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries: new entries applied - resuming follower {}", follower2Id);
// Now stop dropping AppendEntries in follower 2.
follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
// Verify follower 2 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 0, payload0);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 1, payload1);
// Ensure there's at least 1 more heartbeat.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
// The leader should now have performed fake snapshots to trim the log.
verifyLeadersTrimmedLog(1);
// Even though follower 2 lagged behind, the leader should not have tried to install a snapshot
// to catch it up because no snapshotting was done so the follower's next index was present in the log.
InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor, InstallSnapshot.class);
Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries complete");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot.
/**
* Do another round of payloads and snapshot to verify replicatedToAllIndex gets back on track and
* snapshots works as expected after doing a follower snapshot. In this step we don't lag a follower.
*/
private long verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot() throws Exception {
testLog.info("verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot starting: replicatedToAllIndex: {}", leader.getReplicatedToAllIndex());
// Send another payload - a snapshot should occur.
MockPayload payload4 = sendPayloadData(leaderActor, "four");
// Wait for the snapshot to complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
// Verify the leader's last persisted snapshot (previous ones may not be purged yet).
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
// The last (fourth) payload may or may not have been applied when the snapshot is captured depending on the
// timing when the async persistence completes.
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
long leadersSnapshotIndex;
if (unAppliedEntry.isEmpty()) {
leadersSnapshotIndex = 4;
expSnapshotState.add(payload4);
verifySnapshot("Persisted", persistedSnapshot, currentTerm, 4, currentTerm, 4);
} else {
leadersSnapshotIndex = 3;
verifySnapshot("Persisted", persistedSnapshot, currentTerm, 3, currentTerm, 4);
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 4, payload4);
expSnapshotState.add(payload4);
}
// Send a couple more payloads.
MockPayload payload5 = sendPayloadData(leaderActor, "five");
MockPayload payload6 = sendPayloadData(leaderActor, "six");
// Verify the leader applies the 2 log entries.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 3);
verifyApplyState(applyStates.get(1), leaderCollectorActor, payload5.toString(), currentTerm, 5, payload5);
verifyApplyState(applyStates.get(2), leaderCollectorActor, payload6.toString(), currentTerm, 6, payload6);
// Verify the leader applies a log entry for at least the last entry index.
verifyApplyJournalEntries(leaderCollectorActor, 6);
// Ensure there's at least 1 more heartbeat to trim the log.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
// Verify the leader's final state.
verifyLeadersTrimmedLog(6);
InMemoryJournal.dumpJournal(leaderId);
// Verify the leaders's persisted journal log - it should only contain the last 2 ReplicatedLogEntries
// added after the snapshot as the persisted journal should've been purged to the snapshot
// sequence number.
verifyPersistedJournal(leaderId, Arrays.asList(new SimpleReplicatedLogEntry(5, currentTerm, payload5), new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
// Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index.
List<ApplyJournalEntries> persistedApplyJournalEntries = InMemoryJournal.get(leaderId, ApplyJournalEntries.class);
boolean found = false;
for (ApplyJournalEntries entry : persistedApplyJournalEntries) {
if (entry.getToIndex() == 6) {
found = true;
break;
}
}
Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 6), found);
// Verify follower 1 applies the 3 log entries.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
// Verify follower 1's log state.
verifyFollowersTrimmedLog(1, follower1Actor, 6);
// Verify follower 2 applies the 3 log entries.
applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 3);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
// Verify follower 2's log state.
verifyFollowersTrimmedLog(2, follower2Actor, 6);
expSnapshotState.add(payload5);
expSnapshotState.add(payload6);
testLog.info("verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot ending");
return leadersSnapshotIndex;
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method testLeaderSnapshotWithLaggingFollowerCaughtUpViaAppendEntries.
/**
* Send payloads to trigger a leader snapshot due to snapshotBatchCount reached with follower 2
* lagging but not enough for the leader to trim its log from the last applied index. Follower 2's log
* will be behind by several entries and, when it is resumed, it should be caught up via AppendEntries
* sent by the leader.
*/
@Test
public void testLeaderSnapshotWithLaggingFollowerCaughtUpViaAppendEntries() throws Exception {
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaAppendEntries starting");
setup();
sendInitialPayloadsReplicatedToAllFollowers("zero", "one");
// Configure follower 2 to drop messages and lag.
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Send the first payload and verify it gets applied by the leader and follower 1.
MockPayload payload2 = sendPayloadData(leaderActor, "two");
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
verifyApplyState(applyState, leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
verifyApplyState(applyState, null, null, currentTerm, 2, payload2);
expSnapshotState.add(payload2);
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.clearMessages(follower1CollectorActor);
// Send another payload - this should cause a snapshot due to snapshotBatchCount reached.
MockPayload payload3 = sendPayloadData(leaderActor, "three");
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaAppendEntries: sending 2 more payloads");
// Send 2 more payloads - not enough to trigger another snapshot.
MockPayload payload4 = sendPayloadData(leaderActor, "four");
MockPayload payload5 = sendPayloadData(leaderActor, "five");
// Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 3);
verifyApplyState(applyStates.get(0), leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
verifyApplyState(applyStates.get(1), leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
verifyApplyState(applyStates.get(2), leaderCollectorActor, payload5.toString(), currentTerm, 5, payload5);
// Verify follower 1 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 3, payload3);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 5, payload5);
// The snapshot should have caused the leader to advanced the snapshot index to the
// last previously applied index (1) that was replicated to all followers at the time of capture.
// Note: since the log size (3) did not exceed the snapshot batch count (4), the leader should not
// have trimmed the log to the last index actually applied (5).
assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader journal log size", 4, leaderContext.getReplicatedLog().size());
assertEquals("Leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", 5, leaderContext.getCommitIndex());
assertEquals("Leader last applied", 5, leaderContext.getLastApplied());
assertEquals("Leader replicatedToAllIndex", 1, leader.getReplicatedToAllIndex());
// Now stop dropping AppendEntries in follower 2.
follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
// Verify follower 2 applies each log entry. The leader should not install a snapshot b/c
// follower 2's next index (3) is still present in the log.
applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 4);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 2, payload2);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 3, payload3);
verifyApplyState(applyStates.get(2), null, null, currentTerm, 4, payload4);
verifyApplyState(applyStates.get(3), null, null, currentTerm, 5, payload5);
// Verify the leader did not try to install a snapshot to catch up follower 2.
InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor, InstallSnapshot.class);
Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
// Ensure there's at least 1 more heartbeat.
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
// The leader should now have performed fake snapshots to advance the snapshot index and to trim
// the log. In addition replicatedToAllIndex should've advanced.
verifyLeadersTrimmedLog(5);
// Verify the leader's persisted snapshot.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 2, currentTerm, 3);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 3, payload3);
// Verify follower 1's log and snapshot indexes.
MessageCollectorActor.clearMessages(follower1CollectorActor);
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
verifyFollowersTrimmedLog(1, follower1Actor, 5);
// Verify follower 2's log and snapshot indexes.
MessageCollectorActor.clearMessages(follower2CollectorActor);
MessageCollectorActor.expectFirstMatching(follower2CollectorActor, AppendEntries.class);
verifyFollowersTrimmedLog(2, follower2Actor, 5);
MessageCollectorActor.clearMessages(leaderCollectorActor);
MessageCollectorActor.clearMessages(follower1CollectorActor);
MessageCollectorActor.clearMessages(follower2CollectorActor);
expSnapshotState.add(payload3);
expSnapshotState.add(payload4);
expSnapshotState.add(payload5);
testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaAppendEntries complete");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class AbstractRaftActorBehavior method applyLogToStateMachine.
/**
* Applies the log entries up to the specified index that is known to be committed to the state machine.
*
* @param index the log index
*/
protected void applyLogToStateMachine(final long index) {
// Now maybe we apply to the state machine
for (long i = context.getLastApplied() + 1; i < index + 1; i++) {
ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(i);
if (replicatedLogEntry != null) {
// Send a local message to the local RaftActor (it's derived class to be
// specific to apply the log to it's index)
final ApplyState applyState;
final ClientRequestTracker tracker = removeClientRequestTracker(i);
if (tracker != null) {
applyState = new ApplyState(tracker.getClientActor(), tracker.getIdentifier(), replicatedLogEntry);
} else {
applyState = new ApplyState(null, null, replicatedLogEntry);
}
log.debug("{}: Setting last applied to {}", logName(), i);
context.setLastApplied(i);
context.getApplyStateConsumer().accept(applyState);
} else {
// if one index is not present in the log, no point in looping
// around as the rest wont be present either
log.warn("{}: Missing index {} from log. Cannot apply state. Ignoring {} to {}", logName(), i, i, index);
break;
}
}
// send a message to persist a ApplyLogEntries marker message into akka's persistent journal
// will be used during recovery
// in case if the above code throws an error and this message is not sent, it would be fine
// as the append entries received later would initiate add this message to the journal
actor().tell(new ApplyJournalEntries(context.getLastApplied()), actor());
}
Aggregations