use of io.scalecube.cluster.Member in project scalecube by scalecube.
the class GossipProtocolTest method testGossipProtocol.
@Test
public void testGossipProtocol() throws Exception {
// Init gossip protocol instances
List<GossipProtocolImpl> gossipProtocols = initGossipProtocols(membersNum, lossPercent, meanDelay);
// Subscribe on gossips
long disseminationTime = 0;
LongSummaryStatistics messageSentStatsDissemination = null;
LongSummaryStatistics messageLostStatsDissemination = null;
LongSummaryStatistics messageSentStatsOverall = null;
LongSummaryStatistics messageLostStatsOverall = null;
long gossipTimeout = ClusterMath.gossipTimeoutToSweep(gossipRepeatMultiplier, membersNum, gossipInterval);
try {
final String gossipData = "test gossip - " + ThreadLocalRandom.current().nextLong();
final CountDownLatch latch = new CountDownLatch(membersNum - 1);
final Map<Member, Member> receivers = new ConcurrentHashMap<>();
final AtomicBoolean doubleDelivery = new AtomicBoolean(false);
for (final GossipProtocolImpl protocol : gossipProtocols) {
protocol.listen().subscribe(gossip -> {
if (gossipData.equals(gossip.data())) {
boolean firstTimeAdded = receivers.put(protocol.getMember(), protocol.getMember()) == null;
if (firstTimeAdded) {
latch.countDown();
} else {
LOGGER.error("Delivered gossip twice to: {}", protocol.getTransport().address());
doubleDelivery.set(true);
}
}
});
}
// Spread gossip, measure and verify delivery metrics
long start = System.currentTimeMillis();
gossipProtocols.get(0).spread(Message.fromData(gossipData));
// Await for double gossip timeout
latch.await(2 * gossipTimeout, TimeUnit.MILLISECONDS);
disseminationTime = System.currentTimeMillis() - start;
messageSentStatsDissemination = computeMessageSentStats(gossipProtocols);
if (lossPercent > 0) {
messageLostStatsDissemination = computeMessageLostStats(gossipProtocols);
}
Assert.assertEquals("Not all members received gossip", membersNum - 1, receivers.size());
Assert.assertTrue("Too long dissemination time " + disseminationTime + "ms (timeout " + gossipTimeout + "ms)", disseminationTime < gossipTimeout);
// Await gossip lifetime plus few gossip intervals too ensure gossip is fully spread
if (awaitFullCompletion) {
long awaitCompletionTime = gossipTimeout - disseminationTime + 3 * gossipInterval;
Thread.sleep(awaitCompletionTime);
messageSentStatsOverall = computeMessageSentStats(gossipProtocols);
if (lossPercent > 0) {
messageLostStatsOverall = computeMessageLostStats(gossipProtocols);
}
}
Assert.assertFalse("Delivered gossip twice to same member", doubleDelivery.get());
} finally {
// Print theoretical results
LOGGER.info("Experiment params: N={}, Gfanout={}, Grepeat_mult={}, Tgossip={}ms Ploss={}%, Tmean={}ms", membersNum, gossipFanout, gossipRepeatMultiplier, gossipInterval, lossPercent, meanDelay);
double convergProb = gossipConvergencePercent(gossipFanout, gossipRepeatMultiplier, membersNum, lossPercent);
long expDissemTime = gossipDisseminationTime(gossipRepeatMultiplier, membersNum, gossipInterval);
int maxMsgPerNode = maxMessagesPerGossipPerNode(gossipFanout, gossipRepeatMultiplier, membersNum);
int maxMsgTotal = maxMessagesPerGossipTotal(gossipFanout, gossipRepeatMultiplier, membersNum);
LOGGER.info("Expected dissemination time is {}ms with probability {}%", expDissemTime, convergProb);
LOGGER.info("Max messages sent per node {} and total {}", maxMsgPerNode, maxMsgTotal);
// Print actual results
LOGGER.info("Actual dissemination time: {}ms (timeout {}ms)", disseminationTime, gossipTimeout);
LOGGER.info("Messages sent stats (diss.): {}", messageSentStatsDissemination);
if (lossPercent > 0) {
LOGGER.info("Messages lost stats (diss.): {}", messageLostStatsDissemination);
}
if (awaitFullCompletion) {
LOGGER.info("Messages sent stats (total): {}", messageSentStatsOverall);
if (lossPercent > 0) {
LOGGER.info("Messages lost stats (total): {}", messageLostStatsOverall);
}
}
// Destroy gossip protocol instances
destroyGossipProtocols(gossipProtocols);
}
}
use of io.scalecube.cluster.Member in project scalecube by scalecube.
the class ClusterMetadataExample method main.
/**
* Main method.
*/
public static void main(String[] args) throws Exception {
// Start seed cluster member Alice
Cluster alice = Cluster.joinAwait();
// Join Joe to cluster with metadata
Map<String, String> metadata = ImmutableMap.of("name", "Joe");
Cluster joe = Cluster.joinAwait(metadata, alice.address());
// Subscribe Joe to listen for incoming messages and print them to system out
joe.listen().map(Message::data).subscribe(System.out::println);
// Scan the list of members in the cluster and find Joe there
Optional<Member> joeMemberOptional = alice.otherMembers().stream().filter(member -> "Joe".equals(member.metadata().get("name"))).findAny();
// Send hello to Joe
if (joeMemberOptional.isPresent()) {
alice.send(joeMemberOptional.get(), Message.fromData("Hello Joe"));
}
TimeUnit.SECONDS.sleep(3);
}
use of io.scalecube.cluster.Member in project scalecube by scalecube.
the class GossipRequestTest method testSerializationAndDeserialization.
@Test
public void testSerializationAndDeserialization() throws Exception {
Member from = new Member("0", Address.from("localhost:1234"));
List<Gossip> gossips = getGossips();
Message message = Message.withData(new GossipRequest(gossips, from.id())).correlationId("CORR_ID").build();
ByteBuf bb = buffer();
MessageCodec.serialize(message, bb);
assertTrue(bb.readableBytes() > 0);
ByteBuf input = copiedBuffer(bb);
Message deserializedMessage = MessageCodec.deserialize(input);
assertNotNull(deserializedMessage);
Assert.assertEquals(deserializedMessage.data().getClass(), GossipRequest.class);
Assert.assertEquals("CORR_ID", deserializedMessage.correlationId());
GossipRequest gossipRequest = deserializedMessage.data();
assertNotNull(gossipRequest);
assertNotNull(gossipRequest.gossips());
assertNotNull(gossipRequest.gossips().get(0));
Object msgData = gossipRequest.gossips().get(0).message().data();
assertNotNull(msgData);
assertTrue(msgData.toString(), msgData instanceof TestData);
assertEquals(testData.getProperties(), ((TestData) msgData).getProperties());
}
use of io.scalecube.cluster.Member in project scalecube by scalecube.
the class FailureDetectorImpl method doPing.
// ================================================
// ============== Action Methods ==================
// ================================================
private void doPing() {
// Increment period counter
period++;
// Select ping member
Member pingMember = selectPingMember();
if (pingMember == null) {
return;
}
// Send ping
Member localMember = membership.member();
String cid = localMember.id() + "-" + Long.toString(period);
PingData pingData = new PingData(localMember, pingMember);
Message pingMsg = Message.withData(pingData).qualifier(PING).correlationId(cid).build();
try {
LOGGER.trace("Send Ping[{}] to {}", period, pingMember);
transport.listen().observeOn(scheduler).filter(this::isPingAck).filter(message -> cid.equals(message.correlationId())).take(1).timeout(config.getPingTimeout(), TimeUnit.MILLISECONDS, scheduler).subscribe(message -> {
LOGGER.trace("Received PingAck[{}] from {}", period, pingMember);
publishPingResult(pingMember, MemberStatus.ALIVE);
}, throwable -> {
LOGGER.trace("Timeout getting PingAck[{}] from {} within {} ms", period, pingMember, config.getPingTimeout());
doPingReq(pingMember, cid);
});
transport.send(pingMember.address(), pingMsg);
} catch (Exception cause) {
LOGGER.error("Exception on sending Ping[{}] to {}: {}", period, pingMember, cause.getMessage(), cause);
}
}
use of io.scalecube.cluster.Member in project scalecube by scalecube.
the class FailureDetectorImpl method doPingReq.
private void doPingReq(final Member pingMember, String cid) {
final int timeout = config.getPingInterval() - config.getPingTimeout();
if (timeout <= 0) {
LOGGER.trace("No PingReq[{}] occurred, because no time left (pingInterval={}, pingTimeout={})", period, config.getPingInterval(), config.getPingTimeout());
publishPingResult(pingMember, MemberStatus.SUSPECT);
return;
}
final List<Member> pingReqMembers = selectPingReqMembers(pingMember);
if (pingReqMembers.isEmpty()) {
LOGGER.trace("No PingReq[{}] occurred, because member selection is empty", period);
publishPingResult(pingMember, MemberStatus.SUSPECT);
return;
}
Member localMember = membership.member();
transport.listen().observeOn(scheduler).filter(this::isPingAck).filter(message -> cid.equals(message.correlationId())).take(1).timeout(timeout, TimeUnit.MILLISECONDS, scheduler).subscribe(message -> {
LOGGER.trace("Received transit PingAck[{}] from {} to {}", period, message.sender(), pingMember);
publishPingResult(pingMember, MemberStatus.ALIVE);
}, throwable -> {
LOGGER.trace("Timeout getting transit PingAck[{}] from {} to {} within {} ms", period, pingReqMembers, pingMember, timeout);
publishPingResult(pingMember, MemberStatus.SUSPECT);
});
PingData pingReqData = new PingData(localMember, pingMember);
Message pingReqMsg = Message.withData(pingReqData).qualifier(PING_REQ).correlationId(cid).build();
LOGGER.trace("Send PingReq[{}] to {} for {}", period, pingReqMembers, pingMember);
for (Member pingReqMember : pingReqMembers) {
transport.send(pingReqMember.address(), pingReqMsg);
}
}
Aggregations