use of org.neo4j.causalclustering.identity.MemberId in project neo4j by neo4j.
the class HazelcastClusterTopology method getCoreTopology.
static CoreTopology getCoreTopology(HazelcastInstance hazelcastInstance, Config config, Log log) {
Map<MemberId, CoreServerInfo> coreMembers = emptyMap();
boolean canBeBootstrapped = false;
ClusterId clusterId = null;
if (hazelcastInstance != null) {
Set<Member> hzMembers = hazelcastInstance.getCluster().getMembers();
canBeBootstrapped = canBeBootstrapped(hazelcastInstance, config);
coreMembers = toCoreMemberMap(hzMembers, log, hazelcastInstance);
clusterId = getClusterId(hazelcastInstance);
} else {
log.info("Cannot currently bind to distributed discovery service.");
}
return new CoreTopology(clusterId, canBeBootstrapped, coreMembers);
}
use of org.neo4j.causalclustering.identity.MemberId in project neo4j by neo4j.
the class HazelcastClusterTopology method toCoreMemberMap.
static Map<MemberId, CoreServerInfo> toCoreMemberMap(Set<Member> members, Log log, HazelcastInstance hazelcastInstance) {
Map<MemberId, CoreServerInfo> coreMembers = new HashMap<>();
MultiMap<String, String> serverGroupsMMap = hazelcastInstance.getMultiMap(SERVER_GROUPS_MULTIMAP_NAME);
for (Member member : members) {
try {
MemberId memberId = new MemberId(UUID.fromString(member.getStringAttribute(MEMBER_UUID)));
CoreServerInfo coreServerInfo = new CoreServerInfo(socketAddress(member.getStringAttribute(RAFT_SERVER), AdvertisedSocketAddress::new), socketAddress(member.getStringAttribute(TRANSACTION_SERVER), AdvertisedSocketAddress::new), ClientConnectorAddresses.fromString(member.getStringAttribute(CLIENT_CONNECTOR_ADDRESSES)), asSet(serverGroupsMMap.get(memberId.getUuid().toString())));
coreMembers.put(memberId, coreServerInfo);
} catch (IllegalArgumentException e) {
log.warn("Incomplete member attributes supplied from Hazelcast", e);
}
}
return coreMembers;
}
use of org.neo4j.causalclustering.identity.MemberId in project neo4j by neo4j.
the class ServerPoliciesPlugin method writeEndpoints.
private List<Endpoint> writeEndpoints(CoreTopology cores) {
MemberId leader;
try {
leader = leaderLocator.getLeader();
} catch (NoLeaderFoundException e) {
return emptyList();
}
Optional<Endpoint> endPoint = cores.find(leader).map(extractBoltAddress()).map(Endpoint::write);
return asList(endPoint);
}
use of org.neo4j.causalclustering.identity.MemberId in project neo4j by neo4j.
the class ServerPoliciesPlugin method readEndpoints.
private List<Endpoint> readEndpoints(CoreTopology coreTopology, ReadReplicaTopology rrTopology, Policy policy) {
Set<ServerInfo> possibleReaders = rrTopology.members().entrySet().stream().map(entry -> new ServerInfo(entry.getValue().connectors().boltAddress(), entry.getKey(), entry.getValue().groups())).collect(Collectors.toSet());
if (allowReadsOnFollowers || possibleReaders.size() == 0) {
Set<MemberId> validCores = coreTopology.members().keySet();
try {
MemberId leader = leaderLocator.getLeader();
validCores = validCores.stream().filter(memberId -> !memberId.equals(leader)).collect(Collectors.toSet());
} catch (NoLeaderFoundException ignored) {
// we might end up using the leader for reading during this ttl, should be fine in general
}
for (MemberId validCore : validCores) {
Optional<CoreServerInfo> coreServerInfo = coreTopology.find(validCore);
if (coreServerInfo.isPresent()) {
CoreServerInfo serverInfo = coreServerInfo.get();
possibleReaders.add(new ServerInfo(serverInfo.connectors().boltAddress(), validCore, serverInfo.groups()));
}
}
}
Set<ServerInfo> readers = policy.apply(possibleReaders);
return readers.stream().map(r -> Endpoint.read(r.boltAddress())).collect(Collectors.toList());
}
use of org.neo4j.causalclustering.identity.MemberId in project neo4j by neo4j.
the class CatchUpTest method newMemberWithNoLogShouldCatchUpFromPeers.
@Test
public void newMemberWithNoLogShouldCatchUpFromPeers() throws Throwable {
DirectNetworking net = new DirectNetworking();
// given
final MemberId leaderId = member(0);
final MemberId sleepyId = member(2);
final MemberId[] awakeMembers = { leaderId, member(1) };
final MemberId[] allMembers = { leaderId, member(1), sleepyId };
RaftTestFixture fixture = new RaftTestFixture(net, 3, allMembers);
fixture.bootstrap(allMembers);
fixture.members().withId(leaderId).raftInstance().installCoreState(new RaftCoreState(new MembershipEntry(0, new HashSet<>(Arrays.asList(allMembers)))));
fixture.members().withId(leaderId).timeoutService().invokeTimeout(RaftMachine.Timeouts.ELECTION);
net.processMessages();
final MemberId leader = fixture.members().withId(leaderId).member();
net.disconnect(sleepyId);
// when
fixture.members().withId(leaderId).raftInstance().handle(new Request(leader, valueOf(10)));
fixture.members().withId(leaderId).raftInstance().handle(new Request(leader, valueOf(20)));
fixture.members().withId(leaderId).raftInstance().handle(new Request(leader, valueOf(30)));
fixture.members().withId(leaderId).raftInstance().handle(new Request(leader, valueOf(40)));
net.processMessages();
// then
for (MemberId awakeMember : awakeMembers) {
assertThat(integerValues(fixture.members().withId(awakeMember).raftLog()), hasItems(10, 20, 30, 40));
}
assertThat(integerValues(fixture.members().withId(sleepyId).raftLog()), empty());
// when
net.reconnect(sleepyId);
// TODO: This needs an injectable/controllable timeout service for the log shipper.
Thread.sleep(500);
net.processMessages();
// then
assertThat(integerValues(fixture.members().withId(sleepyId).raftLog()), hasItems(10, 20, 30, 40));
}
Aggregations