use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest method testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower.
/**
* Send payloads with follower 2 lagging with the last payload having a large enough size to trigger a
* leader snapshot such that the leader trims its log from the last applied index.. Follower 2's log will
* be behind by several entries and, when it is resumed, it should be caught up via a snapshot installed
* by the leader.
*/
@Test
public void testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower() throws Exception {
testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceededWithLaggingFollower starting");
snapshotBatchCount = 5;
setup();
sendInitialPayloadsReplicatedToAllFollowers("zero");
leaderActor.underlyingActor().setMockTotalMemory(1000);
// We'll expect a ReplicatedLogImplEntry message and an ApplyJournalEntries message added to the journal.
InMemoryJournal.addWriteMessagesCompleteLatch(leaderId, 2);
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
// Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5, TimeUnit.MILLISECONDS);
// Send a payload with a large relative size but not enough to trigger a snapshot.
MockPayload payload1 = sendPayloadData(leaderActor, "one", 500);
// Verify the leader got consensus and applies the first log entry even though follower 2 didn't respond.
List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 1);
verifyApplyState(applyStates.get(0), leaderCollectorActor, payload1.toString(), currentTerm, 1, payload1);
// Wait for all the ReplicatedLogImplEntry and ApplyJournalEntries messages to be added to the journal
// before the snapshot so the snapshot sequence # will be higher to ensure the snapshot gets
// purged from the snapshot store after subsequent snapshots.
InMemoryJournal.waitForWriteMessagesComplete(leaderId);
// Verify a snapshot is not triggered.
CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
expSnapshotState.add(payload1);
// Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5, TimeUnit.MILLISECONDS);
// Send another payload with a large enough relative size in combination with the last payload
// that exceeds the memory threshold (70% * 1000 = 700) - this should do a snapshot.
MockPayload payload2 = sendPayloadData(leaderActor, "two", 201);
// Verify the leader applies the last log entry.
applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(1), leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
// Verify follower 1 applies each log entry.
applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 2);
verifyApplyState(applyStates.get(0), null, null, currentTerm, 1, payload1);
verifyApplyState(applyStates.get(1), null, null, currentTerm, 2, payload2);
// A snapshot should've occurred - wait for it to complete.
MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
// Because the snapshot was triggered by exceeding the memory threshold the leader should've advanced
// the snapshot index to the last applied index and trimmed the log even though the entries weren't
// replicated to all followers.
verifyLeadersTrimmedLog(2, 0);
// Verify the leader's persisted snapshot.
List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 1, currentTerm, 2);
List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 2, payload2);
expSnapshotState.add(payload2);
verifyInstallSnapshotToLaggingFollower(2L, null);
// Sends a payload with index 3.
verifyNoSubsequentSnapshotAfterMemoryThresholdExceededSnapshot();
// Sends 3 payloads with indexes 4, 5 and 6.
long leadersSnapshotIndexOnRecovery = verifyReplicationsAndSnapshotWithNoLaggingAfterInstallSnapshot();
// Recover the leader from persistence and verify.
long leadersLastIndexOnRecovery = 6;
long leadersFirstJournalEntryIndexOnRecovery = leadersSnapshotIndexOnRecovery + 1;
verifyLeaderRecoveryAfterReinstatement(leadersLastIndexOnRecovery, leadersSnapshotIndexOnRecovery, leadersFirstJournalEntryIndexOnRecovery);
testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceeded ending");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class ReplicationWithSlicedPayloadIntegrationTest method runTest.
@Test
public void runTest() throws Exception {
testLog.info("ReplicationWithSlicedPayloadIntegrationTest starting");
// Create the leader and 2 follower actors.
snapshotChunkSize = 20;
DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id)), followerConfigParams);
follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, testActorPath(follower1Id)), followerConfigParams);
peerAddresses = ImmutableMap.<String, String>builder().put(follower1Id, follower1Actor.path().toString()).put(follower2Id, follower2Actor.path().toString()).build();
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
leaderContext = leaderActor.underlyingActor().getRaftActorContext();
waitUntilLeader(leaderActor);
currentTerm = leaderContext.getTermInformation().getCurrentTerm();
// Send a large payload that exceeds the size threshold and needs to be sliced.
MockPayload largePayload = sendPayloadData(leaderActor, "large", snapshotChunkSize + 1);
// Then send a small payload that does not need to be sliced.
MockPayload smallPayload = sendPayloadData(leaderActor, "normal", snapshotChunkSize - 1);
final List<ApplyState> leaderApplyState = expectMatching(leaderCollectorActor, ApplyState.class, 2);
verifyApplyState(leaderApplyState.get(0), leaderCollectorActor, largePayload.toString(), currentTerm, 0, largePayload);
verifyApplyState(leaderApplyState.get(1), leaderCollectorActor, smallPayload.toString(), currentTerm, 1, smallPayload);
final List<ApplyState> follower1ApplyState = expectMatching(follower1CollectorActor, ApplyState.class, 2);
verifyApplyState(follower1ApplyState.get(0), null, null, currentTerm, 0, largePayload);
verifyApplyState(follower1ApplyState.get(1), null, null, currentTerm, 1, smallPayload);
final List<ApplyState> follower2ApplyState = expectMatching(follower2CollectorActor, ApplyState.class, 2);
verifyApplyState(follower2ApplyState.get(0), null, null, currentTerm, 0, largePayload);
verifyApplyState(follower2ApplyState.get(1), null, null, currentTerm, 1, smallPayload);
testLog.info("ReplicationWithSlicedPayloadIntegrationTest ending");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class RaftActorServerConfigurationSupportTest method testRemoveServer.
@Test
public void testRemoveServer() throws Exception {
LOG.info("testRemoveServer starting");
DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
configParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
final String follower1ActorId = actorFactory.generateActorId(FOLLOWER_ID);
final String follower1ActorPath = actorFactory.createTestActorPath(follower1ActorId);
final String follower2ActorId = actorFactory.generateActorId(FOLLOWER_ID2);
final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
RaftActorContext initialActorContext = new MockRaftActorContext();
final String downNodeId = "downNode";
TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
final ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
ActorRef follower1Collector = actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower1Actor = actorFactory.createTestActor(CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE, follower1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
ActorRef follower2Collector = actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
final TestActorRef<CollectingMockRaftActor> follower2Actor = actorFactory.createTestActor(CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(), FOLLOWER_ID, follower1ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE, follower2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
leaderActor.underlyingActor().waitForInitializeBehaviorComplete();
follower1Actor.underlyingActor().waitForInitializeBehaviorComplete();
follower2Actor.underlyingActor().waitForInitializeBehaviorComplete();
leaderActor.tell(new RemoveServer(FOLLOWER_ID), testKit.getRef());
RemoveServerReply removeServerReply = testKit.expectMsgClass(testKit.duration("5 seconds"), RemoveServerReply.class);
assertEquals("getStatus", ServerChangeStatus.OK, removeServerReply.getStatus());
ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollector, ApplyState.class);
assertEquals(0L, applyState.getReplicatedLogEntry().getIndex());
verifyServerConfigurationPayloadEntry(leaderActor.underlyingActor().getRaftActorContext().getReplicatedLog(), votingServer(LEADER_ID), votingServer(FOLLOWER_ID2), votingServer(downNodeId));
applyState = MessageCollectorActor.expectFirstMatching(follower2Collector, ApplyState.class);
assertEquals(0L, applyState.getReplicatedLogEntry().getIndex());
verifyServerConfigurationPayloadEntry(leaderActor.underlyingActor().getRaftActorContext().getReplicatedLog(), votingServer(LEADER_ID), votingServer(FOLLOWER_ID2), votingServer(downNodeId));
RaftActorBehavior currentBehavior = leaderActor.underlyingActor().getCurrentBehavior();
assertTrue("Expected Leader", currentBehavior instanceof Leader);
assertEquals("Follower ids size", 2, ((Leader) currentBehavior).getFollowerIds().size());
MessageCollectorActor.expectFirstMatching(follower1Collector, ServerRemoved.class);
LOG.info("testRemoveServer ending");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class IsolationScenarioTest method testLeaderIsolationWithPriorUncommittedEntryAndOneConflictingEntry.
/**
* Isolates the leader with a payload entry that's replicated to all followers and committed on the leader but
* uncommitted on the followers. While isolated, the majority partition elects a new leader and both sides of the
* partition attempt to commit one entry independently. After isolation is removed, the entry will conflict and both
* sides should reconcile their logs appropriately.
*/
@Test
public void testLeaderIsolationWithPriorUncommittedEntryAndOneConflictingEntry() throws Exception {
testLog.info("testLeaderIsolationWithPriorUncommittedEntryAndOneConflictingEntry starting");
createRaftActors();
// Submit an initial payload that is committed/applied on all nodes.
final MockPayload payload0 = sendPayloadData(leaderActor, "zero");
verifyApplyJournalEntries(leaderCollectorActor, 0);
verifyApplyJournalEntries(follower1CollectorActor, 0);
verifyApplyJournalEntries(follower2CollectorActor, 0);
// Submit another payload that is replicated to all followers and committed on the leader but the leader is
// isolated before the entry is committed on the followers. To accomplish this we drop the AppendEntries
// with the updated leader commit index.
follower1Actor.underlyingActor().startDropMessages(AppendEntries.class, ae -> ae.getLeaderCommit() == 1);
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class, ae -> ae.getLeaderCommit() == 1);
MockPayload payload1 = sendPayloadData(leaderActor, "one");
// Wait for the isolated leader to send AppendEntries to the followers with the new entry with index 1. This
// message is forwarded to the followers.
expectFirstMatching(follower1CollectorActor, AppendEntries.class, ae -> ae.getEntries().size() == 1 && ae.getEntries().get(0).getIndex() == 1 && ae.getEntries().get(0).getData().equals(payload1));
expectFirstMatching(follower2CollectorActor, AppendEntries.class, ae -> ae.getEntries().size() == 1 && ae.getEntries().get(0).getIndex() == 1 && ae.getEntries().get(0).getData().equals(payload1));
verifyApplyJournalEntries(leaderCollectorActor, 1);
isolateLeader();
// Send a payload to the isolated leader so it has an uncommitted log entry with index 2.
testLog.info("Sending payload to isolated leader");
final MockPayload isolatedLeaderPayload2 = sendPayloadData(leaderActor, "two");
// Wait for the isolated leader to send AppendEntries to follower1 with the entry at index 2. Note the message
// is collected but not forwarded to the follower RaftActor.
AppendEntries appendEntries = expectFirstMatching(follower1CollectorActor, AppendEntries.class);
assertEquals("getTerm", currentTerm, appendEntries.getTerm());
assertEquals("getLeaderId", leaderId, appendEntries.getLeaderId());
assertEquals("getEntries().size()", 1, appendEntries.getEntries().size());
verifyReplicatedLogEntry(appendEntries.getEntries().get(0), currentTerm, 2, isolatedLeaderPayload2);
// The leader should transition to IsolatedLeader.
expectFirstMatching(leaderNotifierActor, RoleChanged.class, rc -> rc.getNewRole().equals(RaftState.IsolatedLeader.name()));
forceElectionOnFollower1();
// Send a payload to the new leader follower1 and verify it's replicated to follower2 and committed. Since the
// entry with index 1 from the previous term was uncommitted, the new leader should've also committed a
// NoopPayload entry with index 2 in the PreLeader state. Thus the new payload will have index 3.
testLog.info("Sending payload to new leader");
final MockPayload newLeaderPayload2 = sendPayloadData(follower1Actor, "two-new");
verifyApplyJournalEntries(follower1CollectorActor, 3);
verifyApplyJournalEntries(follower2CollectorActor, 3);
assertEquals("Follower 1 journal last term", currentTerm, follower1Context.getReplicatedLog().lastTerm());
assertEquals("Follower 1 journal last index", 3, follower1Context.getReplicatedLog().lastIndex());
assertEquals("Follower 1 commit index", 3, follower1Context.getCommitIndex());
verifyReplicatedLogEntry(follower1Context.getReplicatedLog().get(3), currentTerm, 3, newLeaderPayload2);
assertEquals("Follower 1 state", Lists.newArrayList(payload0, payload1, newLeaderPayload2), follower1Actor.underlyingActor().getState());
removeIsolation();
// Previous leader should switch to follower b/c it will receive either an AppendEntries or AppendEntriesReply
// with a higher term.
expectFirstMatching(leaderNotifierActor, RoleChanged.class, rc -> rc.getNewRole().equals(RaftState.Follower.name()));
// The previous leader has a conflicting log entry at index 2 with a different term which should get
// replaced by the new leader's entry.
verifyApplyJournalEntries(leaderCollectorActor, 3);
verifyRaftState(leaderActor, raftState -> {
assertEquals("Prior leader journal last term", currentTerm, leaderContext.getReplicatedLog().lastTerm());
assertEquals("Prior leader journal last index", 3, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Prior leader commit index", 3, leaderContext.getCommitIndex());
});
assertEquals("Prior leader state", Lists.newArrayList(payload0, payload1, newLeaderPayload2), leaderActor.underlyingActor().getState());
// Ensure the prior leader didn't apply its conflicting entry with index 2, term 1.
List<ApplyState> applyState = getAllMatching(leaderCollectorActor, ApplyState.class);
for (ApplyState as : applyState) {
if (as.getReplicatedLogEntry().getIndex() == 2 && as.getReplicatedLogEntry().getTerm() == 1) {
fail("Got unexpected ApplyState: " + as);
}
}
// The prior leader should not have needed a snapshot installed in order to get it synced.
assertNoneMatching(leaderCollectorActor, InstallSnapshot.class);
testLog.info("testLeaderIsolationWithPriorUncommittedEntryAndOneConflictingEntry ending");
}
use of org.opendaylight.controller.cluster.raft.base.messages.ApplyState in project controller by opendaylight.
the class IsolationScenarioTest method testLeaderIsolationWithPriorUncommittedEntryAndMultipleConflictingEntries.
/**
* Isolates the leader with a payload entry that's replicated to all followers and committed on the leader but
* uncommitted on the followers. While isolated, the majority partition elects a new leader and both sides of the
* partition attempt to commit multiple entries independently. After isolation is removed, the entries will conflict
* and both sides should reconcile their logs appropriately.
*/
@Test
public void testLeaderIsolationWithPriorUncommittedEntryAndMultipleConflictingEntries() throws Exception {
testLog.info("testLeaderIsolationWithPriorUncommittedEntryAndMultipleConflictingEntries starting");
createRaftActors();
// Submit an initial payload that is committed/applied on all nodes.
final MockPayload payload0 = sendPayloadData(leaderActor, "zero");
verifyApplyJournalEntries(leaderCollectorActor, 0);
verifyApplyJournalEntries(follower1CollectorActor, 0);
verifyApplyJournalEntries(follower2CollectorActor, 0);
// Submit another payload that is replicated to all followers and committed on the leader but the leader is
// isolated before the entry is committed on the followers. To accomplish this we drop the AppendEntries
// with the updated leader commit index.
follower1Actor.underlyingActor().startDropMessages(AppendEntries.class, ae -> ae.getLeaderCommit() == 1);
follower2Actor.underlyingActor().startDropMessages(AppendEntries.class, ae -> ae.getLeaderCommit() == 1);
MockPayload payload1 = sendPayloadData(leaderActor, "one");
// Wait for the isolated leader to send AppendEntries to the followers with the new entry with index 1. This
// message is forwarded to the followers.
expectFirstMatching(follower1CollectorActor, AppendEntries.class, ae -> ae.getEntries().size() == 1 && ae.getEntries().get(0).getIndex() == 1 && ae.getEntries().get(0).getData().equals(payload1));
expectFirstMatching(follower2CollectorActor, AppendEntries.class, ae -> ae.getEntries().size() == 1 && ae.getEntries().get(0).getIndex() == 1 && ae.getEntries().get(0).getData().equals(payload1));
verifyApplyJournalEntries(leaderCollectorActor, 1);
isolateLeader();
// Send 3 payloads to the isolated leader so it has uncommitted log entries.
testLog.info("Sending 3 payloads to isolated leader");
sendPayloadData(leaderActor, "two");
sendPayloadData(leaderActor, "three");
sendPayloadData(leaderActor, "four");
// Wait for the isolated leader to send AppendEntries to follower1 for each new entry. Note the messages
// are collected but not forwarded to the follower RaftActor.
expectFirstMatching(follower1CollectorActor, AppendEntries.class, ae -> {
for (ReplicatedLogEntry e : ae.getEntries()) {
if (e.getIndex() == 4) {
return true;
}
}
return false;
});
// The leader should transition to IsolatedLeader.
expectFirstMatching(leaderNotifierActor, RoleChanged.class, rc -> rc.getNewRole().equals(RaftState.IsolatedLeader.name()));
forceElectionOnFollower1();
// Send 3 payloads to the new leader follower1 and verify they're replicated to follower2 and committed. Since
// the entry with index 1 from the previous term was uncommitted, the new leader should've also committed a
// NoopPayload entry with index 2 in the PreLeader state. Thus the new payload indices will start at 3.
testLog.info("Sending 3 payloads to new leader");
final MockPayload newLeaderPayload2 = sendPayloadData(follower1Actor, "two-new");
final MockPayload newLeaderPayload3 = sendPayloadData(follower1Actor, "three-new");
final MockPayload newLeaderPayload4 = sendPayloadData(follower1Actor, "four-new");
verifyApplyJournalEntries(follower1CollectorActor, 5);
verifyApplyJournalEntries(follower2CollectorActor, 5);
assertEquals("Follower 1 journal last term", currentTerm, follower1Context.getReplicatedLog().lastTerm());
assertEquals("Follower 1 journal last index", 5, follower1Context.getReplicatedLog().lastIndex());
assertEquals("Follower 1 commit index", 5, follower1Context.getCommitIndex());
verifyReplicatedLogEntry(follower1Context.getReplicatedLog().get(5), currentTerm, 5, newLeaderPayload4);
assertEquals("Follower 1 state", Lists.newArrayList(payload0, payload1, newLeaderPayload2, newLeaderPayload3, newLeaderPayload4), follower1Actor.underlyingActor().getState());
removeIsolation();
// Previous leader should switch to follower b/c it will receive either an AppendEntries or AppendEntriesReply
// with a higher term.
expectFirstMatching(leaderNotifierActor, RoleChanged.class, rc -> rc.getNewRole().equals(RaftState.Follower.name()));
// The previous leader has conflicting log entries starting at index 2 with different terms which should get
// replaced by the new leader's entries.
verifyApplyJournalEntries(leaderCollectorActor, 5);
verifyRaftState(leaderActor, raftState -> {
assertEquals("Prior leader journal last term", currentTerm, leaderContext.getReplicatedLog().lastTerm());
assertEquals("Prior leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Prior leader commit index", 5, leaderContext.getCommitIndex());
});
assertEquals("Prior leader state", Lists.newArrayList(payload0, payload1, newLeaderPayload2, newLeaderPayload3, newLeaderPayload4), leaderActor.underlyingActor().getState());
// Ensure the prior leader didn't apply any of its conflicting entries with term 1.
List<ApplyState> applyState = getAllMatching(leaderCollectorActor, ApplyState.class);
for (ApplyState as : applyState) {
if (as.getReplicatedLogEntry().getTerm() == 1) {
fail("Got unexpected ApplyState: " + as);
}
}
// The prior leader should not have needed a snapshot installed in order to get it synced.
assertNoneMatching(leaderCollectorActor, InstallSnapshot.class);
testLog.info("testLeaderIsolationWithPriorUncommittedEntryAndMultipleConflictingEntries ending");
}
Aggregations