Search in sources :

Example 6 with ServerInfo

use of org.opendaylight.controller.cluster.raft.persisted.ServerInfo in project controller by opendaylight.

the class RaftActorServerConfigurationSupportTest method testChangeToVotingWithNoLeaderAndElectionTimeout.

@Test
public void testChangeToVotingWithNoLeaderAndElectionTimeout() {
    LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout starting");
    final String node1ID = "node1";
    final String node2ID = "node2";
    final PeerAddressResolver peerAddressResolver = peerId -> peerId.equals(node1ID) ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID) ? actorFactory.createTestActorPath(node2ID) : null;
    ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
    SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
    InMemoryJournal.addEntry(node1ID, 1, new UpdateElectionTerm(1, "node1"));
    InMemoryJournal.addEntry(node1ID, 2, persistedServerConfigEntry);
    InMemoryJournal.addEntry(node2ID, 1, new UpdateElectionTerm(1, "node1"));
    InMemoryJournal.addEntry(node2ID, 2, persistedServerConfigEntry);
    DefaultConfigParamsImpl configParams1 = new DefaultConfigParamsImpl();
    configParams1.setHeartBeatInterval(new FiniteDuration(100, TimeUnit.MILLISECONDS));
    configParams1.setElectionTimeoutFactor(1);
    configParams1.setPeerAddressResolver(peerAddressResolver);
    ActorRef node1Collector = actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
    TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams1, PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
    final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
    DefaultConfigParamsImpl configParams2 = new DefaultConfigParamsImpl();
    configParams2.setElectionTimeoutFactor(1000000);
    configParams2.setPeerAddressResolver(peerAddressResolver);
    ActorRef node2Collector = actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
    TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams2, PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
    CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
    // Send a ChangeServersVotingStatus message to node1 to change mode1 to voting. This should cause
    // node1 to try to elect itself as leader in order to apply the new server config. But we'll drop
    // RequestVote messages in node2 which should cause node1 to time out and revert back to the previous
    // server config and fail with NO_LEADER. Note that node1 shouldn't forward the request to node2 b/c
    // node2 was previously voting.
    node2RaftActor.setDropMessageOfType(RequestVote.class);
    ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true));
    node1RaftActorRef.tell(changeServers, testKit.getRef());
    ServerChangeReply reply = testKit.expectMsgClass(testKit.duration("5 seconds"), ServerChangeReply.class);
    assertEquals("getStatus", ServerChangeStatus.NO_LEADER, reply.getStatus());
    assertEquals("Server config", Sets.newHashSet(nonVotingServer(node1ID), votingServer(node2ID)), Sets.newHashSet(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
    assertEquals("getRaftState", RaftState.Follower, node1RaftActor.getRaftState());
    LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout ending");
}
Also used : UnInitializedFollowerSnapshotReply(org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply) SimpleReplicatedLogEntry(org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry) Arrays(java.util.Arrays) AddServerReply(org.opendaylight.controller.cluster.raft.messages.AddServerReply) ForwardMessageToBehaviorActor(org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor) LoggerFactory(org.slf4j.LoggerFactory) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo) ApplySnapshot(org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot) ActorRef(akka.actor.ActorRef) Optional(com.google.common.base.Optional) RemoveServer(org.opendaylight.controller.cluster.raft.messages.RemoveServer) RemoveServerReply(org.opendaylight.controller.cluster.raft.messages.RemoveServerReply) Map(java.util.Map) After(org.junit.After) Assert.fail(org.junit.Assert.fail) UntypedActor(akka.actor.UntypedActor) ApplyJournalEntries(org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries) RaftActorBehavior(org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior) Dispatchers(akka.dispatch.Dispatchers) AppendEntries(org.opendaylight.controller.cluster.raft.messages.AppendEntries) ServerConfigurationPayload(org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload) ImmutableMap(com.google.common.collect.ImmutableMap) AddServer(org.opendaylight.controller.cluster.raft.messages.AddServer) FiniteDuration(scala.concurrent.duration.FiniteDuration) RequestVote(org.opendaylight.controller.cluster.raft.messages.RequestVote) MessageCollectorActor.clearMessages(org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.clearMessages) Sets(com.google.common.collect.Sets) List(java.util.List) AbstractLeader(org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader) MessageCollectorActor(org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor) MessageCollectorActor.expectMatching(org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching) Props(akka.actor.Props) CaptureSnapshotReply(org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply) InstallSnapshot(org.opendaylight.controller.cluster.raft.messages.InstallSnapshot) Follower(org.opendaylight.controller.cluster.raft.behaviors.Follower) Stopwatch(com.google.common.base.Stopwatch) ApplyState(org.opendaylight.controller.cluster.raft.base.messages.ApplyState) ServerChangeStatus(org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus) InMemoryJournal(org.opendaylight.controller.cluster.raft.utils.InMemoryJournal) SerializationUtils(org.apache.commons.lang3.SerializationUtils) InMemorySnapshotStore(org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore) TimeoutNow(org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow) ArrayList(java.util.ArrayList) InitiateCaptureSnapshot(org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot) MessageCollectorActor.assertNoneMatching(org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.assertNoneMatching) ChangeServersVotingStatus(org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus) ByteSource(com.google.common.io.ByteSource) Before(org.junit.Before) DisableElectionsRaftPolicy(org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy) OutputStream(java.io.OutputStream) MessageCollectorActor.expectFirstMatching(org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectFirstMatching) Logger(org.slf4j.Logger) Assert.assertTrue(org.junit.Assert.assertTrue) ServerChangeReply(org.opendaylight.controller.cluster.raft.messages.ServerChangeReply) Test(org.junit.Test) TestKit(akka.testkit.javadsl.TestKit) TestActorRef(akka.testkit.TestActorRef) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) NonPersistentDataProvider(org.opendaylight.controller.cluster.NonPersistentDataProvider) ServerRemoved(org.opendaylight.controller.cluster.raft.messages.ServerRemoved) ByteState(org.opendaylight.controller.cluster.raft.persisted.ByteState) Leader(org.opendaylight.controller.cluster.raft.behaviors.Leader) UpdateElectionTerm(org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo) ActorRef(akka.actor.ActorRef) TestActorRef(akka.testkit.TestActorRef) FiniteDuration(scala.concurrent.duration.FiniteDuration) ServerConfigurationPayload(org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload) SimpleReplicatedLogEntry(org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry) ChangeServersVotingStatus(org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus) ServerChangeReply(org.opendaylight.controller.cluster.raft.messages.ServerChangeReply) UpdateElectionTerm(org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm) Test(org.junit.Test)

Example 7 with ServerInfo

use of org.opendaylight.controller.cluster.raft.persisted.ServerInfo in project controller by opendaylight.

the class LeadershipTransferIntegrationTest method createRaftActors.

private void createRaftActors() {
    testLog.info("createRaftActors starting");
    final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), -1, -1, -1, -1, 1, null, new org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload(Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true), new ServerInfo(follower2Id, true), new ServerInfo(follower3Id, false))));
    InMemorySnapshotStore.addSnapshot(leaderId, snapshot);
    InMemorySnapshotStore.addSnapshot(follower1Id, snapshot);
    InMemorySnapshotStore.addSnapshot(follower2Id, snapshot);
    InMemorySnapshotStore.addSnapshot(follower3Id, snapshot);
    follower1NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower1Id + "-notifier"));
    follower1Actor = newTestRaftActor(follower1Id, TestRaftActor.newBuilder().peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id), follower3Id, testActorPath(follower3Id))).config(newFollowerConfigParams()).roleChangeNotifier(follower1NotifierActor));
    follower2NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower2Id + "-notifier"));
    follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), follower3Id, testActorPath(follower3Id))).config(newFollowerConfigParams()).roleChangeNotifier(follower2NotifierActor));
    follower3NotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(follower3Id + "-notifier"));
    follower3Actor = newTestRaftActor(follower3Id, TestRaftActor.newBuilder().peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(), follower2Id, follower2Actor.path().toString())).config(newFollowerConfigParams()).roleChangeNotifier(follower3NotifierActor));
    peerAddresses = ImmutableMap.<String, String>builder().put(follower1Id, follower1Actor.path().toString()).put(follower2Id, follower2Actor.path().toString()).put(follower3Id, follower3Actor.path().toString()).build();
    leaderConfigParams = newLeaderConfigParams();
    leaderConfigParams.setElectionTimeoutFactor(3);
    leaderNotifierActor = factory.createActor(MessageCollectorActor.props(), factory.generateActorId(leaderId + "-notifier"));
    leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses).config(leaderConfigParams).roleChangeNotifier(leaderNotifierActor));
    follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
    follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
    follower3CollectorActor = follower3Actor.underlyingActor().collectorActor();
    leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
    leaderContext = leaderActor.underlyingActor().getRaftActorContext();
    waitUntilLeader(leaderActor);
    testLog.info("createRaftActors starting");
}
Also used : Snapshot(org.opendaylight.controller.cluster.raft.persisted.Snapshot) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo)

Example 8 with ServerInfo

use of org.opendaylight.controller.cluster.raft.persisted.ServerInfo in project controller by opendaylight.

the class InstallSnapshotTest method testSerialization.

@Test
public void testSerialization() {
    byte[] data = new byte[1000];
    for (int i = 0, j = 0; i < data.length; i++) {
        data[i] = (byte) j;
        if (++j >= 255) {
            j = 0;
        }
    }
    ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("leader", true), new ServerInfo("follower", false)));
    InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, Optional.<Integer>of(54321), Optional.of(serverConfig));
    Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION);
    assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass());
    InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized);
    verifyInstallSnapshot(expected, actual);
    expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6);
    actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable(RaftVersions.CURRENT_VERSION));
    verifyInstallSnapshot(expected, actual);
}
Also used : Serializable(java.io.Serializable) ServerConfigurationPayload(org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo) Test(org.junit.Test)

Example 9 with ServerInfo

use of org.opendaylight.controller.cluster.raft.persisted.ServerInfo in project controller by opendaylight.

the class ClusterAdminRpcServiceTest method testFlipMemberVotingStates.

@Test
public void testFlipMemberVotingStates() throws Exception {
    String name = "testFlipMemberVotingStates";
    ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false)));
    setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
    setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
    setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
    String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
    final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name).moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
    final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).moduleShardsConfig(moduleShardsConfig).build();
    final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).moduleShardsConfig(moduleShardsConfig).build();
    leaderNode1.configDataStore().waitTillReady();
    leaderNode1.operDataStore().waitTillReady();
    replicaNode3.configDataStore().waitTillReady();
    replicaNode3.operDataStore().waitTillReady();
    verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
    ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), null);
    RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards().get(10, TimeUnit.SECONDS);
    FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
    verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational));
    verifyVotingStates(new AbstractDataStore[] { leaderNode1.configDataStore(), leaderNode1.operDataStore(), replicaNode2.configDataStore(), replicaNode2.operDataStore(), replicaNode3.configDataStore(), replicaNode3.operDataStore() }, new String[] { "cars", "people" }, new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", TRUE));
    // Leadership should have transferred to member 3 since it is the only remaining voting member.
    verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
        assertNotNull("Expected non-null leader Id", raftState.getLeader());
        assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-3"));
    });
    verifyRaftState(leaderNode1.operDataStore(), "cars", raftState -> {
        assertNotNull("Expected non-null leader Id", raftState.getLeader());
        assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-3"));
    });
    // Flip the voting states back to the original states.
    rpcResult = service3.flipMemberVotingStatesForAllShards().get(10, TimeUnit.SECONDS);
    result = verifySuccessfulRpcResult(rpcResult);
    verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational));
    verifyVotingStates(new AbstractDataStore[] { leaderNode1.configDataStore(), leaderNode1.operDataStore(), replicaNode2.configDataStore(), replicaNode2.operDataStore(), replicaNode3.configDataStore(), replicaNode3.operDataStore() }, new String[] { "cars", "people" }, new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
    // Leadership should have transferred to member 1 or 2.
    verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
        assertNotNull("Expected non-null leader Id", raftState.getLeader());
        assertTrue("Expected leader member-1 or member-2. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-1") || raftState.getLeader().contains("member-2"));
    });
}
Also used : MemberNode(org.opendaylight.controller.cluster.datastore.MemberNode) ServerConfigurationPayload(org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo) FlipMemberVotingStatesForAllShardsOutput(org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Test(org.junit.Test)

Example 10 with ServerInfo

use of org.opendaylight.controller.cluster.raft.persisted.ServerInfo in project controller by opendaylight.

the class NonVotingFollowerIntegrationTest method testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart.

/**
 * Tests non-voting follower re-sync after the non-persistent leader restarts and commits new log
 * entries prior to re-connecting to the follower. The leader's last index will be greater than the
 * follower's last index corresponding to the previous data retained in memory. So the follower's log
 * will be behind the leader's log but the leader's log entries will have a higher term. It also adds a
 * "down" peer on restart so the leader doesn't trim its log as it's trying to resync the follower.
 * Eventually the follower should force the leader to install snapshot to re-sync its state.
 */
@Test
public void testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart() {
    testLog.info("testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart starting");
    setupLeaderAndNonVotingFollower();
    // Add log entries and verify they are committed and applied by both nodes.
    expSnapshotState.add(sendPayloadData(leaderActor, "zero"));
    expSnapshotState.add(sendPayloadData(leaderActor, "one"));
    expSnapshotState.add(sendPayloadData(leaderActor, "two"));
    MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, expSnapshotState.size());
    MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, expSnapshotState.size());
    long lastIndex = 2;
    assertEquals("Leader journal lastIndex", lastIndex, leaderContext.getReplicatedLog().lastIndex());
    assertEquals("Leader commit index", lastIndex, leaderContext.getCommitIndex());
    assertEquals("Follower journal lastIndex", lastIndex, follower1Context.getReplicatedLog().lastIndex());
    assertEquals("Follower commit index", lastIndex, follower1Context.getCommitIndex());
    assertEquals("Follower applied state", expSnapshotState, followerInstance.getState());
    MessageCollectorActor.clearMessages(follower1CollectorActor);
    MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
    assertEquals("Follower snapshot index", lastIndex - 1, follower1Context.getReplicatedLog().getSnapshotIndex());
    assertEquals("Follower journal size", 1, leaderContext.getReplicatedLog().size());
    // Restart the leader
    killActor(leaderActor);
    MessageCollectorActor.clearMessages(follower1CollectorActor);
    // Temporarily drop AppendEntries to simulate a disconnect when the leader restarts.
    followerInstance.startDropMessages(AppendEntries.class);
    // Add a "down" peer so the leader doesn't trim its log as it's trying to resync the follower. The
    // leader will keep decrementing the follower's nextIndex to try to find a matching index. Since
    // there is no matching index it will eventually hit index 0 which should cause the follower to
    // force an install snapshot upon failure to remove the conflicting indexes due to indexes 0 and 1
    // being in the prior snapshot and not the log.
    // 
    // We also add another voting follower actor into the mix even though it shoildn't affect the
    // outcome.
    ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false), new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false)));
    SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm, persistedServerConfig);
    InMemoryJournal.clear();
    InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(currentTerm, leaderId));
    InMemoryJournal.addEntry(leaderId, 2, persistedServerConfigEntry);
    InMemoryJournal.addEntry(follower2Id, 1, persistedServerConfigEntry);
    DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams();
    follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
    follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString())).config(follower2ConfigParams).persistent(Optional.of(false)));
    TestRaftActor follower2Instance = follower2Actor.underlyingActor();
    follower2Instance.waitForRecoveryComplete();
    follower2CollectorActor = follower2Instance.collectorActor();
    peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(), follower2Id, follower2Actor.path().toString());
    createNewLeaderActor();
    currentTerm++;
    assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
    assertEquals("Leader journal lastIndex", -1, leaderContext.getReplicatedLog().lastIndex());
    assertEquals("Leader commit index", -1, leaderContext.getCommitIndex());
    // Add new log entries to the leader - several more than the prior log entries
    expSnapshotState.add(sendPayloadData(leaderActor, "zero-1"));
    expSnapshotState.add(sendPayloadData(leaderActor, "one-1"));
    expSnapshotState.add(sendPayloadData(leaderActor, "two-1"));
    expSnapshotState.add(sendPayloadData(leaderActor, "three-1"));
    expSnapshotState.add(sendPayloadData(leaderActor, "four-1"));
    MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, expSnapshotState.size());
    MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, expSnapshotState.size());
    lastIndex = 4;
    assertEquals("Leader journal lastIndex", lastIndex, leaderContext.getReplicatedLog().lastIndex());
    assertEquals("Leader commit index", lastIndex, leaderContext.getCommitIndex());
    assertEquals("Leader snapshot index", -1, leaderContext.getReplicatedLog().getSnapshotIndex());
    assertEquals("Leader replicatedToAllIndex", -1, leaderInstance.getCurrentBehavior().getReplicatedToAllIndex());
    // Re-enable AppendEntries to the follower. The follower's log will be out of sync and it should
    // should force the leader to install snapshot to re-sync the entire follower's log and state.
    followerInstance.stopDropMessages(AppendEntries.class);
    MessageCollectorActor.expectFirstMatching(follower1CollectorActor, SnapshotComplete.class);
    assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
    assertEquals("Follower journal lastIndex", lastIndex, follower1Context.getReplicatedLog().lastIndex());
    assertEquals("Follower journal lastTerm", currentTerm, follower1Context.getReplicatedLog().lastTerm());
    assertEquals("Follower commit index", lastIndex, follower1Context.getCommitIndex());
    assertEquals("Follower applied state", expSnapshotState, followerInstance.getState());
    testLog.info("testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart ending");
}
Also used : ServerConfigurationPayload(org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload) SimpleReplicatedLogEntry(org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry) ServerInfo(org.opendaylight.controller.cluster.raft.persisted.ServerInfo) DisableElectionsRaftPolicy(org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy) UpdateElectionTerm(org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm) Test(org.junit.Test)

Aggregations

ServerInfo (org.opendaylight.controller.cluster.raft.persisted.ServerInfo)23 ServerConfigurationPayload (org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload)21 Test (org.junit.Test)17 SimpleReplicatedLogEntry (org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry)13 UpdateElectionTerm (org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm)7 FiniteDuration (scala.concurrent.duration.FiniteDuration)5 ActorRef (akka.actor.ActorRef)4 TestActorRef (akka.testkit.TestActorRef)4 ArrayList (java.util.ArrayList)4 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)4 ApplySnapshot (org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot)4 ApplyState (org.opendaylight.controller.cluster.raft.base.messages.ApplyState)4 ChangeServersVotingStatus (org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus)4 InstallSnapshot (org.opendaylight.controller.cluster.raft.messages.InstallSnapshot)4 ServerChangeReply (org.opendaylight.controller.cluster.raft.messages.ServerChangeReply)4 ApplyJournalEntries (org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries)3 Snapshot (org.opendaylight.controller.cluster.raft.persisted.Snapshot)3 Props (akka.actor.Props)2 UntypedActor (akka.actor.UntypedActor)2 Dispatchers (akka.dispatch.Dispatchers)2