use of org.neo4j.helpers.AdvertisedSocketAddress in project neo4j by neo4j.
the class HazelcastCoreTopologyService method createHazelcastInstance.
private HazelcastInstance createHazelcastInstance() {
System.setProperty(WAIT_SECONDS_BEFORE_JOIN.getName(), "1");
JoinConfig joinConfig = new JoinConfig();
joinConfig.getMulticastConfig().setEnabled(false);
joinConfig.getAwsConfig().setEnabled(false);
TcpIpConfig tcpIpConfig = joinConfig.getTcpIpConfig();
tcpIpConfig.setEnabled(true);
List<AdvertisedSocketAddress> initialMembers = config.get(CausalClusteringSettings.initial_discovery_members);
for (AdvertisedSocketAddress address : initialMembers) {
tcpIpConfig.addMember(address.toString());
}
Setting<ListenSocketAddress> discovery_listen_address = CausalClusteringSettings.discovery_listen_address;
ListenSocketAddress hazelcastAddress = config.get(discovery_listen_address);
InterfacesConfig interfaces = new InterfacesConfig();
interfaces.addInterface(hazelcastAddress.getHostname());
NetworkConfig networkConfig = new NetworkConfig();
networkConfig.setInterfaces(interfaces);
networkConfig.setPort(hazelcastAddress.getPort());
networkConfig.setJoin(joinConfig);
networkConfig.setPortAutoIncrement(false);
com.hazelcast.config.Config c = new com.hazelcast.config.Config();
c.setProperty(OPERATION_CALL_TIMEOUT_MILLIS.getName(), String.valueOf(10_000));
c.setProperty(MERGE_NEXT_RUN_DELAY_SECONDS.getName(), "10");
c.setProperty(MERGE_FIRST_RUN_DELAY_SECONDS.getName(), "10");
c.setProperty(INITIAL_MIN_CLUSTER_SIZE.getName(), String.valueOf(minimumClusterSizeThatCanTolerateOneFaultForExpectedClusterSize()));
c.setProperty(LOGGING_TYPE.getName(), "none");
c.setNetworkConfig(networkConfig);
MemberAttributeConfig memberAttributeConfig = HazelcastClusterTopology.buildMemberAttributesForCore(myself, config);
c.setMemberAttributeConfig(memberAttributeConfig);
logConnectionInfo(initialMembers);
JobScheduler.JobHandle logJob = scheduler.schedule("HazelcastHealth", HAZELCAST_IS_HEALTHY_TIMEOUT_MS, () -> log.warn("The server has not been able to connect in a timely fashion to the " + "cluster. Please consult the logs for more details. Rebooting the server may " + "solve the problem."));
try {
hazelcastInstance = Hazelcast.newHazelcastInstance(c);
logJob.cancel(true);
} catch (HazelcastException e) {
String errorMessage = String.format("Hazelcast was unable to start with setting: %s = %s", discovery_listen_address.name(), config.get(discovery_listen_address));
userLog.error(errorMessage);
log.error(errorMessage, e);
throw new RuntimeException(e);
}
List<String> groups = config.get(CausalClusteringSettings.server_groups);
refreshGroups(hazelcastInstance, myself.getUuid().toString(), groups);
return hazelcastInstance;
}
use of org.neo4j.helpers.AdvertisedSocketAddress in project neo4j by neo4j.
the class CausalClusterConfigurationValidatorTest method validateSuccess.
@Test
public void validateSuccess() throws Exception {
// when
Config config = Config.embeddedDefaults(stringMap(ClusterSettings.mode.name(), mode.name(), initial_discovery_members.name(), "localhost:99,remotehost:2", new BoltConnector("bolt").enabled.name(), "true"), Collections.singleton(new CausalClusterConfigurationValidator()));
// then
assertEquals(asList(new AdvertisedSocketAddress("localhost", 99), new AdvertisedSocketAddress("remotehost", 2)), config.get(initial_discovery_members));
}
use of org.neo4j.helpers.AdvertisedSocketAddress in project neo4j by neo4j.
the class BackupStoreCopyInteractionStressTesting method shouldBehaveCorrectlyUnderStress.
@Test
public void shouldBehaveCorrectlyUnderStress() throws Exception {
int numberOfCores = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_CORES", DEFAULT_NUMBER_OF_CORES));
int numberOfEdges = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_EDGES", DEFAULT_NUMBER_OF_EDGES));
long durationInMinutes = parseLong(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_DURATION", DEFAULT_DURATION_IN_MINUTES));
String workingDirectory = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_WORKING_DIRECTORY", DEFAULT_WORKING_DIR);
int baseCoreBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_CORE_BACKUP_PORT", DEFAULT_BASE_CORE_BACKUP_PORT));
int baseEdgeBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_EDGE_BACKUP_PORT", DEFAULT_BASE_EDGE_BACKUP_PORT));
boolean enableIndexes = parseBoolean(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_ENABLE_INDEXES", DEFAULT_ENABLE_INDEXES));
String txPrune = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_TX_PRUNE", DEFAULT_TX_PRUNE);
File clusterDirectory = ensureExistsAndEmpty(new File(workingDirectory, "cluster"));
File backupDirectory = ensureExistsAndEmpty(new File(workingDirectory, "backups"));
BiFunction<Boolean, Integer, SocketAddress> backupAddress = (isCore, id) -> new AdvertisedSocketAddress("localhost", (isCore ? baseCoreBackupPort : baseEdgeBackupPort) + id);
Map<String, String> coreParams = enableRaftMessageLogging(configureRaftLogRotationAndPruning(configureTxLogRotationAndPruning(new HashMap<>(), txPrune)));
Map<String, String> readReplicaParams = configureTxLogRotationAndPruning(new HashMap<>(), txPrune);
Map<String, IntFunction<String>> instanceCoreParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(true, id));
Map<String, IntFunction<String>> instanceReadReplicaParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(false, id));
HazelcastDiscoveryServiceFactory discoveryServiceFactory = new HazelcastDiscoveryServiceFactory();
Cluster cluster = new Cluster(clusterDirectory, numberOfCores, numberOfEdges, discoveryServiceFactory, coreParams, instanceCoreParams, readReplicaParams, instanceReadReplicaParams, Standard.LATEST_NAME);
AtomicBoolean stopTheWorld = new AtomicBoolean();
BooleanSupplier notExpired = untilTimeExpired(durationInMinutes, MINUTES);
BooleanSupplier keepGoing = () -> !stopTheWorld.get() && notExpired.getAsBoolean();
Runnable onFailure = () -> stopTheWorld.set(true);
ExecutorService service = Executors.newFixedThreadPool(3);
try {
cluster.start();
if (enableIndexes) {
Workload.setupIndexes(cluster);
}
Future<Throwable> workload = service.submit(new Workload(keepGoing, onFailure, cluster));
Future<Throwable> startStopWorker = service.submit(new StartStopLoad(fs, pageCache, keepGoing, onFailure, cluster, numberOfCores, numberOfEdges));
Future<Throwable> backupWorker = service.submit(new BackupLoad(keepGoing, onFailure, cluster, numberOfCores, numberOfEdges, backupDirectory, backupAddress));
long timeout = durationInMinutes + 5;
assertNull(Exceptions.stringify(workload.get()), workload.get(timeout, MINUTES));
assertNull(Exceptions.stringify(startStopWorker.get()), startStopWorker.get(timeout, MINUTES));
assertNull(Exceptions.stringify(backupWorker.get()), backupWorker.get(timeout, MINUTES));
} finally {
cluster.shutdown();
service.shutdown();
}
// let's cleanup disk space when everything went well
FileUtils.deleteRecursively(clusterDirectory);
FileUtils.deleteRecursively(backupDirectory);
}
use of org.neo4j.helpers.AdvertisedSocketAddress in project neo4j by neo4j.
the class GetServersProcedureForSingleDC method coreReadEndPoints.
private Stream<AdvertisedSocketAddress> coreReadEndPoints() {
Optional<AdvertisedSocketAddress> leader = leaderBoltAddress();
Collection<CoreServerInfo> coreServerInfo = topologyService.coreServers().members().values();
Stream<AdvertisedSocketAddress> boltAddresses = topologyService.coreServers().members().values().stream().map(extractBoltAddress());
// if the leader is present and it is not alone filter it out from the read end points
if (leader.isPresent() && coreServerInfo.size() > 1) {
AdvertisedSocketAddress advertisedSocketAddress = leader.get();
return boltAddresses.filter(address -> !advertisedSocketAddress.equals(address));
}
// or if we cannot locate the leader return all cores as read end points
return boltAddresses;
}
use of org.neo4j.helpers.AdvertisedSocketAddress in project neo4j by neo4j.
the class LegacyGetServersProcedure method coreReadEndPoints.
private Stream<AdvertisedSocketAddress> coreReadEndPoints() {
Optional<AdvertisedSocketAddress> leader = leaderBoltAddress();
Collection<CoreServerInfo> coreServerInfo = topologyService.coreServers().members().values();
Stream<AdvertisedSocketAddress> boltAddresses = topologyService.coreServers().members().values().stream().map(extractBoltAddress());
// if the leader is present and it is not alone filter it out from the read end points
if (leader.isPresent() && coreServerInfo.size() > 1) {
AdvertisedSocketAddress advertisedSocketAddress = leader.get();
return boltAddresses.filter(address -> !advertisedSocketAddress.equals(address));
}
// or if we cannot locate the leader return all cores as read end points
return boltAddresses;
}
Aggregations