use of org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload in project controller by opendaylight.
the class InstallSnapshotTest method testSerialization.
@Test
public void testSerialization() {
byte[] data = new byte[1000];
for (int i = 0, j = 0; i < data.length; i++) {
data[i] = (byte) j;
if (++j >= 255) {
j = 0;
}
}
ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("leader", true), new ServerInfo("follower", false)));
InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, Optional.<Integer>of(54321), Optional.of(serverConfig));
Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION);
assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass());
InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized);
verifyInstallSnapshot(expected, actual);
expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6);
actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable(RaftVersions.CURRENT_VERSION));
verifyInstallSnapshot(expected, actual);
}
use of org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload in project controller by opendaylight.
the class ClusterAdminRpcServiceTest method testFlipMemberVotingStates.
@Test
public void testFlipMemberVotingStates() throws Exception {
String name = "testFlipMemberVotingStates";
ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false)));
setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name).moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).moduleShardsConfig(moduleShardsConfig).build();
final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.configDataStore().waitTillReady();
leaderNode1.operDataStore().waitTillReady();
replicaNode3.configDataStore().waitTillReady();
replicaNode3.operDataStore().waitTillReady();
verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(), null);
RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards().get(10, TimeUnit.SECONDS);
FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational));
verifyVotingStates(new AbstractDataStore[] { leaderNode1.configDataStore(), leaderNode1.operDataStore(), replicaNode2.configDataStore(), replicaNode2.operDataStore(), replicaNode3.configDataStore(), replicaNode3.operDataStore() }, new String[] { "cars", "people" }, new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", TRUE));
// Leadership should have transferred to member 3 since it is the only remaining voting member.
verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
assertNotNull("Expected non-null leader Id", raftState.getLeader());
assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-3"));
});
verifyRaftState(leaderNode1.operDataStore(), "cars", raftState -> {
assertNotNull("Expected non-null leader Id", raftState.getLeader());
assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-3"));
});
// Flip the voting states back to the original states.
rpcResult = service3.flipMemberVotingStatesForAllShards().get(10, TimeUnit.SECONDS);
result = verifySuccessfulRpcResult(rpcResult);
verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config), successShardResult("people", DataStoreType.Config), successShardResult("cars", DataStoreType.Operational), successShardResult("people", DataStoreType.Operational));
verifyVotingStates(new AbstractDataStore[] { leaderNode1.configDataStore(), leaderNode1.operDataStore(), replicaNode2.configDataStore(), replicaNode2.operDataStore(), replicaNode3.configDataStore(), replicaNode3.operDataStore() }, new String[] { "cars", "people" }, new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
// Leadership should have transferred to member 1 or 2.
verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
assertNotNull("Expected non-null leader Id", raftState.getLeader());
assertTrue("Expected leader member-1 or member-2. Actual: " + raftState.getLeader(), raftState.getLeader().contains("member-1") || raftState.getLeader().contains("member-2"));
});
}
use of org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload in project controller by opendaylight.
the class NonVotingFollowerIntegrationTest method testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart.
/**
* Tests non-voting follower re-sync after the non-persistent leader restarts and commits new log
* entries prior to re-connecting to the follower. The leader's last index will be greater than the
* follower's last index corresponding to the previous data retained in memory. So the follower's log
* will be behind the leader's log but the leader's log entries will have a higher term. It also adds a
* "down" peer on restart so the leader doesn't trim its log as it's trying to resync the follower.
* Eventually the follower should force the leader to install snapshot to re-sync its state.
*/
@Test
public void testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart() {
testLog.info("testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart starting");
setupLeaderAndNonVotingFollower();
// Add log entries and verify they are committed and applied by both nodes.
expSnapshotState.add(sendPayloadData(leaderActor, "zero"));
expSnapshotState.add(sendPayloadData(leaderActor, "one"));
expSnapshotState.add(sendPayloadData(leaderActor, "two"));
MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, expSnapshotState.size());
MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, expSnapshotState.size());
long lastIndex = 2;
assertEquals("Leader journal lastIndex", lastIndex, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", lastIndex, leaderContext.getCommitIndex());
assertEquals("Follower journal lastIndex", lastIndex, follower1Context.getReplicatedLog().lastIndex());
assertEquals("Follower commit index", lastIndex, follower1Context.getCommitIndex());
assertEquals("Follower applied state", expSnapshotState, followerInstance.getState());
MessageCollectorActor.clearMessages(follower1CollectorActor);
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
assertEquals("Follower snapshot index", lastIndex - 1, follower1Context.getReplicatedLog().getSnapshotIndex());
assertEquals("Follower journal size", 1, leaderContext.getReplicatedLog().size());
// Restart the leader
killActor(leaderActor);
MessageCollectorActor.clearMessages(follower1CollectorActor);
// Temporarily drop AppendEntries to simulate a disconnect when the leader restarts.
followerInstance.startDropMessages(AppendEntries.class);
// Add a "down" peer so the leader doesn't trim its log as it's trying to resync the follower. The
// leader will keep decrementing the follower's nextIndex to try to find a matching index. Since
// there is no matching index it will eventually hit index 0 which should cause the follower to
// force an install snapshot upon failure to remove the conflicting indexes due to indexes 0 and 1
// being in the prior snapshot and not the log.
//
// We also add another voting follower actor into the mix even though it shoildn't affect the
// outcome.
ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false), new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm, persistedServerConfig);
InMemoryJournal.clear();
InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(currentTerm, leaderId));
InMemoryJournal.addEntry(leaderId, 2, persistedServerConfigEntry);
InMemoryJournal.addEntry(follower2Id, 1, persistedServerConfigEntry);
DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams();
follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString())).config(follower2ConfigParams).persistent(Optional.of(false)));
TestRaftActor follower2Instance = follower2Actor.underlyingActor();
follower2Instance.waitForRecoveryComplete();
follower2CollectorActor = follower2Instance.collectorActor();
peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(), follower2Id, follower2Actor.path().toString());
createNewLeaderActor();
currentTerm++;
assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
assertEquals("Leader journal lastIndex", -1, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", -1, leaderContext.getCommitIndex());
// Add new log entries to the leader - several more than the prior log entries
expSnapshotState.add(sendPayloadData(leaderActor, "zero-1"));
expSnapshotState.add(sendPayloadData(leaderActor, "one-1"));
expSnapshotState.add(sendPayloadData(leaderActor, "two-1"));
expSnapshotState.add(sendPayloadData(leaderActor, "three-1"));
expSnapshotState.add(sendPayloadData(leaderActor, "four-1"));
MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, expSnapshotState.size());
MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, expSnapshotState.size());
lastIndex = 4;
assertEquals("Leader journal lastIndex", lastIndex, leaderContext.getReplicatedLog().lastIndex());
assertEquals("Leader commit index", lastIndex, leaderContext.getCommitIndex());
assertEquals("Leader snapshot index", -1, leaderContext.getReplicatedLog().getSnapshotIndex());
assertEquals("Leader replicatedToAllIndex", -1, leaderInstance.getCurrentBehavior().getReplicatedToAllIndex());
// Re-enable AppendEntries to the follower. The follower's log will be out of sync and it should
// should force the leader to install snapshot to re-sync the entire follower's log and state.
followerInstance.stopDropMessages(AppendEntries.class);
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, SnapshotComplete.class);
assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
assertEquals("Follower journal lastIndex", lastIndex, follower1Context.getReplicatedLog().lastIndex());
assertEquals("Follower journal lastTerm", currentTerm, follower1Context.getReplicatedLog().lastTerm());
assertEquals("Follower commit index", lastIndex, follower1Context.getCommitIndex());
assertEquals("Follower applied state", expSnapshotState, followerInstance.getState());
testLog.info("testFollowerResyncWithMoreLeaderLogEntriesAndDownPeerAfterNonPersistentLeaderRestart ending");
}
use of org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload in project controller by opendaylight.
the class NonVotingFollowerIntegrationTest method setupLeaderAndNonVotingFollower.
private void setupLeaderAndNonVotingFollower() {
snapshotBatchCount = 100;
int persistedTerm = 1;
// Set up a persisted ServerConfigurationPayload with the leader voting and the follower non-voting.
ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false)));
SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, persistedTerm, persistedServerConfig);
InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(persistedTerm, leaderId));
InMemoryJournal.addEntry(leaderId, 2, persistedServerConfigEntry);
InMemoryJournal.addEntry(follower1Id, 1, new UpdateElectionTerm(persistedTerm, leaderId));
InMemoryJournal.addEntry(follower1Id, 2, persistedServerConfigEntry);
DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
follower1Actor = newTestRaftActor(follower1Id, follower1Builder.peerAddresses(ImmutableMap.of(leaderId, testActorPath(leaderId))).config(followerConfigParams).persistent(Optional.of(false)));
peerAddresses = ImmutableMap.<String, String>builder().put(follower1Id, follower1Actor.path().toString()).build();
leaderConfigParams = newLeaderConfigParams();
leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses).config(leaderConfigParams).persistent(Optional.of(false)));
followerInstance = follower1Actor.underlyingActor();
follower1CollectorActor = followerInstance.collectorActor();
leaderInstance = leaderActor.underlyingActor();
leaderCollectorActor = leaderInstance.collectorActor();
leaderContext = leaderInstance.getRaftActorContext();
follower1Context = followerInstance.getRaftActorContext();
waitUntilLeader(leaderActor);
// Verify leader's context after startup
currentTerm = persistedTerm + 1;
assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
assertEquals("Leader server config", Sets.newHashSet(persistedServerConfig.getServerConfig()), Sets.newHashSet(leaderContext.getPeerServerInfo(true).getServerConfig()));
assertEquals("Leader isVotingMember", true, leaderContext.isVotingMember());
// Verify follower's context after startup
MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
assertEquals("Follower server config", Sets.newHashSet(persistedServerConfig.getServerConfig()), Sets.newHashSet(follower1Context.getPeerServerInfo(true).getServerConfig()));
assertEquals("FollowerisVotingMember", false, follower1Context.isVotingMember());
}
use of org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload in project controller by opendaylight.
the class RaftActorContextImplTest method testUpdatePeerIds.
@Test
public void testUpdatePeerIds() {
RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(), "self", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1, Maps.newHashMap(ImmutableMap.<String, String>of("peer1", "peerAddress1")), new DefaultConfigParamsImpl(), createProvider(), applyState -> {
}, LOG);
context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", false), new ServerInfo("peer2", true), new ServerInfo("peer3", false))));
verifyPeerInfo(context, "peer1", null);
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", false);
assertEquals("isVotingMember", false, context.isVotingMember());
context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", true), new ServerInfo("peer2", true), new ServerInfo("peer3", true))));
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", true);
assertEquals("isVotingMember", true, context.isVotingMember());
context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("peer2", true), new ServerInfo("peer3", true))));
verifyPeerInfo(context, "peer2", true);
verifyPeerInfo(context, "peer3", true);
assertEquals("isVotingMember", false, context.isVotingMember());
}
Aggregations