use of io.atomix.cluster.MemberId in project atomix by atomix.
the class RaftTest method createClient.
private RaftClient createClient(List<RaftMember> members) throws Throwable {
final MemberId memberId = nextNodeId();
final List<MemberId> memberIds = members.stream().map(RaftMember::memberId).collect(Collectors.toList());
final RaftClient client = RaftClient.builder().withMemberId(memberId).withPartitionId(PartitionId.from("test", 1)).withProtocol(protocolFactory.newClientProtocol(memberId)).build();
client.connect(memberIds).thenRun(this::resume);
await(30000);
clients.add(client);
return client;
}
use of io.atomix.cluster.MemberId in project atomix by atomix.
the class RaftTest method testSnapshotSentOnDataLoss.
@Test
public void testSnapshotSentOnDataLoss() throws Throwable {
final List<RaftMember> members = Lists.newArrayList(createMember(), createMember(), createMember());
final Map<MemberId, RaftStorage> storages = members.stream().map(RaftMember::memberId).collect(Collectors.toMap(Function.identity(), this::createStorage));
final Map<MemberId, RaftServer> servers = storages.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, this::createServer));
// wait for cluster to start
startCluster(servers);
// fill two segments then compact so we have at least one snapshot
final RaftClient client = createClient(members);
final TestPrimitive primitive = createPrimitive(client);
fillSegment(primitive);
fillSegment(primitive);
Futures.allOf(servers.values().stream().map(RaftServer::compact)).thenRun(this::resume);
await(30000);
// partition into leader/followers
final Map<Boolean, List<RaftMember>> collect = members.stream().collect(Collectors.partitioningBy(m -> servers.get(m.memberId()).isLeader()));
final RaftMember leader = collect.get(true).get(0);
final RaftStorage leaderStorage = storages.get(leader.memberId());
final RaftMember slave = collect.get(false).get(0);
final RaftStorage slaveStorage = storages.get(slave.memberId());
// shutdown client + primitive
primitive.close().thenCompose(nothing -> client.close()).thenRun(this::resume);
await(30000);
// shutdown other node
final RaftMember other = collect.get(false).get(1);
servers.get(other.memberId()).shutdown().thenRun(this::resume);
await(30000);
// shutdown slave and recreate from scratch
final RaftServer slaveServer = recreateServerWithDataLoss(leader, slave, servers.get(slave.memberId()), slaveStorage);
assertEquals(leaderStorage.openSnapshotStore().getCurrentSnapshotIndex(), slaveStorage.openSnapshotStore().getCurrentSnapshotIndex());
// and again a second time to ensure the snapshot index of the member is reset
recreateServerWithDataLoss(leader, slave, slaveServer, slaveStorage);
assertEquals(leaderStorage.openSnapshotStore().getCurrentSnapshotIndex(), slaveStorage.openSnapshotStore().getCurrentSnapshotIndex());
}
use of io.atomix.cluster.MemberId in project atomix by atomix.
the class RaftSessionConnection method sendRequest.
/**
* Sends the given request attempt to the cluster.
*/
protected <T extends RaftRequest, U extends RaftResponse> void sendRequest(T request, BiFunction<MemberId, T, CompletableFuture<U>> sender, int count, CompletableFuture<U> future) {
MemberId node = next();
if (node != null) {
log.trace("Sending {} to {}", request, node);
int selectionId = this.selectionId;
sender.apply(node, request).whenCompleteAsync((r, e) -> {
if (e != null || r != null) {
handleResponse(request, sender, count, selectionId, node, r, e, future);
} else {
future.complete(null);
}
}, context);
} else {
future.completeExceptionally(new ConnectException("Failed to connect to the cluster"));
}
}
use of io.atomix.cluster.MemberId in project zeppelin by apache.
the class ClusterManager method start.
public void start() {
if (!zConf.isClusterMode()) {
return;
}
// RaftClient Thread
new Thread(new Runnable() {
@Override
public void run() {
LOGGER.info("RaftClientThread run() >>>");
int raftClientPort = 0;
try {
raftClientPort = RemoteInterpreterUtils.findRandomAvailablePortOnAllLocalInterfaces();
} catch (IOException e) {
LOGGER.error(e.getMessage());
}
MemberId memberId = MemberId.from(zeplServerHost + ":" + raftClientPort);
Address address = Address.from(zeplServerHost, raftClientPort);
raftAddressMap.put(memberId, address);
MessagingService messagingManager = NettyMessagingService.builder().withAddress(address).build().start().join();
RaftClientProtocol protocol = new RaftClientMessagingProtocol(messagingManager, protocolSerializer, raftAddressMap::get);
raftClient = RaftClient.builder().withMemberId(memberId).withPartitionId(PartitionId.from("partition", 1)).withProtocol(protocol).build();
raftClient.connect(clusterMemberIds).join();
raftSessionClient = createProxy(raftClient);
LOGGER.info("RaftClientThread run() <<<");
}
}).start();
// Cluster Meta Consume Thread
new Thread(new Runnable() {
@Override
public void run() {
try {
while (getRunning().get()) {
ClusterMetaEntity metaEntity = clusterMetaQueue.peek();
if (null != metaEntity) {
// Determine whether the client is connected
int retry = 0;
while (!raftInitialized()) {
retry++;
if (0 == retry % 30) {
LOGGER.warn("Raft incomplete initialization! retry[{}]", retry);
}
Thread.sleep(100);
}
boolean success = false;
switch(metaEntity.getOperation()) {
case DELETE_OPERATION:
success = deleteClusterMeta(metaEntity);
break;
case PUT_OPERATION:
success = putClusterMeta(metaEntity);
break;
}
if (true == success) {
// The operation was successfully deleted
clusterMetaQueue.remove(metaEntity);
LOGGER.info("Cluster Meta Consume success! {}", metaEntity);
} else {
LOGGER.error("Cluster Meta Consume faild!");
}
} else {
Thread.sleep(100);
}
}
} catch (InterruptedException e) {
LOGGER.error(e.getMessage());
}
}
}).start();
}
use of io.atomix.cluster.MemberId in project atomix by atomix.
the class AntiEntropyMapDelegate method antiEntropyCheckLocalItems.
/**
* Processes anti-entropy ad from peer by taking following actions: 1. If peer has an old entry, updates peer. 2. If
* peer indicates an entry is removed and has a more recent timestamp than the local entry, update local state.
*/
private List<MapDelegateEvent<K, V>> antiEntropyCheckLocalItems(AntiEntropyAdvertisement ad) {
final List<MapDelegateEvent<K, V>> externalEvents = Lists.newLinkedList();
final MemberId sender = ad.sender();
final List<MemberId> peers = ImmutableList.of(sender);
Set<String> staleOrMissing = new HashSet<>();
Set<String> locallyUnknown = new HashSet<>(ad.digest().keySet());
items.forEach((key, localValue) -> {
locallyUnknown.remove(key);
MapValue.Digest remoteValueDigest = ad.digest().get(key);
if (remoteValueDigest == null || localValue.isNewerThan(remoteValueDigest.timestamp())) {
// local value is more recent, push to sender
queueUpdate(new UpdateEntry(key, localValue), peers);
} else if (remoteValueDigest.isNewerThan(localValue.digest()) && remoteValueDigest.isTombstone()) {
// remote value is more recent and a tombstone: update local value
MapValue tombstone = MapValue.tombstone(remoteValueDigest.timestamp());
MapValue previousValue = removeInternal(key, Optional.empty(), Optional.of(tombstone));
if (previousValue != null && previousValue.isAlive()) {
externalEvents.add(new MapDelegateEvent<>(REMOVE, decodeKey(key), previousValue.get(this::decodeValue)));
}
} else if (remoteValueDigest.isNewerThan(localValue.digest())) {
// Not a tombstone and remote is newer
staleOrMissing.add(key);
}
});
// Keys missing in local map
staleOrMissing.addAll(locallyUnknown);
// Request updates that we missed out on
sendUpdateRequestToPeer(sender, staleOrMissing);
return externalEvents;
}
Aggregations