use of org.neo4j.causalclustering.discovery.CoreTopology in project neo4j by neo4j.
the class ClusterBinderTest method shouldTimeoutWhenNotBootrappableAndNobodyElsePublishesClusterId.
@Test
public void shouldTimeoutWhenNotBootrappableAndNobodyElsePublishesClusterId() throws Throwable {
// given
CoreTopology unboundTopology = new CoreTopology(null, false, emptyMap());
CoreTopologyService topologyService = mock(CoreTopologyService.class);
when(topologyService.coreServers()).thenReturn(unboundTopology);
ClusterBinder binder = new ClusterBinder(new StubClusterIdStorage(), topologyService, NullLogProvider.getInstance(), clock, () -> clock.forward(1, TimeUnit.SECONDS), 3_000, coreBootstrapper);
try {
// when
binder.bindToCluster(null);
fail("Should have timed out");
} catch (TimeoutException e) {
// expected
}
// then
verify(topologyService, atLeast(2)).coreServers();
}
use of org.neo4j.causalclustering.discovery.CoreTopology in project neo4j by neo4j.
the class ClusterBinderTest method shouldBootstrapWhenBootstrappable.
@Test
public void shouldBootstrapWhenBootstrappable() throws Throwable {
// given
CoreTopology bootstrappableTopology = new CoreTopology(null, true, emptyMap());
CoreTopologyService topologyService = mock(CoreTopologyService.class);
when(topologyService.coreServers()).thenReturn(bootstrappableTopology);
when(topologyService.setClusterId(any())).thenReturn(true);
ClusterBinder binder = new ClusterBinder(new StubClusterIdStorage(), topologyService, NullLogProvider.getInstance(), clock, () -> clock.forward(1, TimeUnit.SECONDS), 3_000, coreBootstrapper);
ThrowingConsumer<CoreSnapshot, Throwable> snapshotInstaller = mock(ThrowingConsumer.class);
// when
binder.bindToCluster(snapshotInstaller);
// then
verify(coreBootstrapper).bootstrap(any());
Optional<ClusterId> clusterId = binder.get();
assertTrue(clusterId.isPresent());
verify(topologyService).setClusterId(clusterId.get());
verify(snapshotInstaller).accept(any());
}
use of org.neo4j.causalclustering.discovery.CoreTopology in project neo4j by neo4j.
the class ConnectToRandomCoreServerStrategyTest method fakeCoreTopology.
static CoreTopology fakeCoreTopology(MemberId... memberIds) {
assert memberIds.length > 0;
ClusterId clusterId = new ClusterId(UUID.randomUUID());
Map<MemberId, CoreServerInfo> coreMembers = new HashMap<>();
int offset = 0;
for (MemberId memberId : memberIds) {
coreMembers.put(memberId, new CoreServerInfo(new AdvertisedSocketAddress("localhost", 5000 + offset), new AdvertisedSocketAddress("localhost", 6000 + offset), new ClientConnectorAddresses(singletonList(new ClientConnectorAddresses.ConnectorUri(ClientConnectorAddresses.Scheme.bolt, new AdvertisedSocketAddress("localhost", 7000 + offset)))), asSet("core")));
offset++;
}
return new CoreTopology(clusterId, false, coreMembers);
}
use of org.neo4j.causalclustering.discovery.CoreTopology in project neo4j by neo4j.
the class UpstreamDatabaseStrategySelectorTest method shouldDefaultToRandomCoreServerIfNoOtherStrategySpecified.
@Test
public void shouldDefaultToRandomCoreServerIfNoOtherStrategySpecified() throws Exception {
// given
TopologyService topologyService = mock(TopologyService.class);
MemberId memberId = new MemberId(UUID.randomUUID());
when(topologyService.coreServers()).thenReturn(new CoreTopology(new ClusterId(UUID.randomUUID()), false, mapOf(memberId, mock(CoreServerInfo.class))));
ConnectToRandomCoreServerStrategy defaultStrategy = new ConnectToRandomCoreServerStrategy();
defaultStrategy.setTopologyService(topologyService);
UpstreamDatabaseStrategySelector selector = new UpstreamDatabaseStrategySelector(defaultStrategy);
// when
MemberId instance = selector.bestUpstreamDatabase();
// then
assertEquals(memberId, instance);
}
use of org.neo4j.causalclustering.discovery.CoreTopology in project neo4j by neo4j.
the class ServerPoliciesPlugin method readEndpoints.
private List<Endpoint> readEndpoints(CoreTopology coreTopology, ReadReplicaTopology rrTopology, Policy policy) {
Set<ServerInfo> possibleReaders = rrTopology.members().entrySet().stream().map(entry -> new ServerInfo(entry.getValue().connectors().boltAddress(), entry.getKey(), entry.getValue().groups())).collect(Collectors.toSet());
if (allowReadsOnFollowers || possibleReaders.size() == 0) {
Set<MemberId> validCores = coreTopology.members().keySet();
try {
MemberId leader = leaderLocator.getLeader();
validCores = validCores.stream().filter(memberId -> !memberId.equals(leader)).collect(Collectors.toSet());
} catch (NoLeaderFoundException ignored) {
// we might end up using the leader for reading during this ttl, should be fine in general
}
for (MemberId validCore : validCores) {
Optional<CoreServerInfo> coreServerInfo = coreTopology.find(validCore);
if (coreServerInfo.isPresent()) {
CoreServerInfo serverInfo = coreServerInfo.get();
possibleReaders.add(new ServerInfo(serverInfo.connectors().boltAddress(), validCore, serverInfo.groups()));
}
}
}
Set<ServerInfo> readers = policy.apply(possibleReaders);
return readers.stream().map(r -> Endpoint.read(r.boltAddress())).collect(Collectors.toList());
}
Aggregations