use of org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory in project neo4j by neo4j.
the class ServerGroupsIT method shouldUpdateGroupsOnStart.
@Test
public void shouldUpdateGroupsOnStart() throws Exception {
AtomicReference<String> suffix = new AtomicReference<>("before");
List<List<String>> expected;
Map<String, IntFunction<String>> instanceCoreParams = new HashMap<>();
instanceCoreParams.put(CausalClusteringSettings.server_groups.name(), (id) -> String.join(", ", makeCoreGroups(suffix.get(), id)));
Map<String, IntFunction<String>> instanceReplicaParams = new HashMap<>();
instanceReplicaParams.put(CausalClusteringSettings.server_groups.name(), (id) -> String.join(", ", makeReplicaGroups(suffix.get(), id)));
int nServers = 3;
cluster = new Cluster(testDir.directory("cluster"), nServers, nServers, new HazelcastDiscoveryServiceFactory(), emptyMap(), instanceCoreParams, emptyMap(), instanceReplicaParams, Standard.LATEST_NAME);
// when
cluster.start();
// then
expected = new ArrayList<>();
for (CoreClusterMember core : cluster.coreMembers()) {
expected.add(makeCoreGroups(suffix.get(), core.serverId()));
expected.add(makeReplicaGroups(suffix.get(), core.serverId()));
}
for (CoreClusterMember core : cluster.coreMembers()) {
assertEventually(core + " should have groups", () -> getServerGroups(core.database()), new GroupsMatcher(expected), 30, SECONDS);
}
// when
expected.remove(makeCoreGroups(suffix.get(), 1));
expected.remove(makeReplicaGroups(suffix.get(), 2));
cluster.getCoreMemberById(1).shutdown();
cluster.getReadReplicaById(2).shutdown();
// should update groups of restarted servers
suffix.set("after");
cluster.addCoreMemberWithId(1).start();
cluster.addReadReplicaWithId(2).start();
expected.add(makeCoreGroups(suffix.get(), 1));
expected.add(makeReplicaGroups(suffix.get(), 2));
// then
for (CoreClusterMember core : cluster.coreMembers()) {
assertEventually(core + " should have groups", () -> getServerGroups(core.database()), new GroupsMatcher(expected), 30, SECONDS);
}
}
use of org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory in project neo4j by neo4j.
the class ServerPoliciesLoadBalancingIT method defaultBehaviour.
@Test
public void defaultBehaviour() throws Exception {
cluster = new Cluster(testDir.directory("cluster"), 3, 3, new HazelcastDiscoveryServiceFactory(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), Standard.LATEST_NAME);
cluster.start();
assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 2, 3));
}
use of org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory in project neo4j by neo4j.
the class ConnectionInfoIT method hzTest.
@Test
public void hzTest() throws Throwable {
// given
testSocket = bindPort("0.0.0.0", 4243);
//when
AssertableLogProvider logProvider = new AssertableLogProvider();
AssertableLogProvider userLogProvider = new AssertableLogProvider();
HazelcastDiscoveryServiceFactory hzFactory = new HazelcastDiscoveryServiceFactory();
Config config = embeddedDefaults(stringMap(discovery_listen_address.name(), ":" + testSocket.getLocalPort(), CausalClusteringSettings.initial_discovery_members.name(), "localhost:" + testSocket.getLocalPort(), new BoltConnector("bolt").enabled.name(), "true", new HttpConnector("http").enabled.name(), "true"));
Neo4jJobScheduler jobScheduler = new Neo4jJobScheduler();
jobScheduler.init();
CoreTopologyService coreTopologyService = hzFactory.coreTopologyService(config, new MemberId(UUID.randomUUID()), jobScheduler, logProvider, userLogProvider);
try {
coreTopologyService.init();
coreTopologyService.start();
}//then
catch (Throwable throwable) {
//expected
}
logProvider.assertContainsMessageContaining("Hazelcast was unable to start with setting");
userLogProvider.assertContainsMessageContaining("Hazelcast was unable to start with setting");
}
use of org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory in project neo4j by neo4j.
the class BackupStoreCopyInteractionStressTesting method shouldBehaveCorrectlyUnderStress.
@Test
public void shouldBehaveCorrectlyUnderStress() throws Exception {
int numberOfCores = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_CORES", DEFAULT_NUMBER_OF_CORES));
int numberOfEdges = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_EDGES", DEFAULT_NUMBER_OF_EDGES));
long durationInMinutes = parseLong(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_DURATION", DEFAULT_DURATION_IN_MINUTES));
String workingDirectory = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_WORKING_DIRECTORY", DEFAULT_WORKING_DIR);
int baseCoreBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_CORE_BACKUP_PORT", DEFAULT_BASE_CORE_BACKUP_PORT));
int baseEdgeBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_EDGE_BACKUP_PORT", DEFAULT_BASE_EDGE_BACKUP_PORT));
boolean enableIndexes = parseBoolean(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_ENABLE_INDEXES", DEFAULT_ENABLE_INDEXES));
String txPrune = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_TX_PRUNE", DEFAULT_TX_PRUNE);
File clusterDirectory = ensureExistsAndEmpty(new File(workingDirectory, "cluster"));
File backupDirectory = ensureExistsAndEmpty(new File(workingDirectory, "backups"));
BiFunction<Boolean, Integer, SocketAddress> backupAddress = (isCore, id) -> new AdvertisedSocketAddress("localhost", (isCore ? baseCoreBackupPort : baseEdgeBackupPort) + id);
Map<String, String> coreParams = enableRaftMessageLogging(configureRaftLogRotationAndPruning(configureTxLogRotationAndPruning(new HashMap<>(), txPrune)));
Map<String, String> readReplicaParams = configureTxLogRotationAndPruning(new HashMap<>(), txPrune);
Map<String, IntFunction<String>> instanceCoreParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(true, id));
Map<String, IntFunction<String>> instanceReadReplicaParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(false, id));
HazelcastDiscoveryServiceFactory discoveryServiceFactory = new HazelcastDiscoveryServiceFactory();
Cluster cluster = new Cluster(clusterDirectory, numberOfCores, numberOfEdges, discoveryServiceFactory, coreParams, instanceCoreParams, readReplicaParams, instanceReadReplicaParams, Standard.LATEST_NAME);
AtomicBoolean stopTheWorld = new AtomicBoolean();
BooleanSupplier notExpired = untilTimeExpired(durationInMinutes, MINUTES);
BooleanSupplier keepGoing = () -> !stopTheWorld.get() && notExpired.getAsBoolean();
Runnable onFailure = () -> stopTheWorld.set(true);
ExecutorService service = Executors.newFixedThreadPool(3);
try {
cluster.start();
if (enableIndexes) {
Workload.setupIndexes(cluster);
}
Future<Throwable> workload = service.submit(new Workload(keepGoing, onFailure, cluster));
Future<Throwable> startStopWorker = service.submit(new StartStopLoad(fs, pageCache, keepGoing, onFailure, cluster, numberOfCores, numberOfEdges));
Future<Throwable> backupWorker = service.submit(new BackupLoad(keepGoing, onFailure, cluster, numberOfCores, numberOfEdges, backupDirectory, backupAddress));
long timeout = durationInMinutes + 5;
assertNull(Exceptions.stringify(workload.get()), workload.get(timeout, MINUTES));
assertNull(Exceptions.stringify(startStopWorker.get()), startStopWorker.get(timeout, MINUTES));
assertNull(Exceptions.stringify(backupWorker.get()), backupWorker.get(timeout, MINUTES));
} finally {
cluster.shutdown();
service.shutdown();
}
// let's cleanup disk space when everything went well
FileUtils.deleteRecursively(clusterDirectory);
FileUtils.deleteRecursively(backupDirectory);
}
use of org.neo4j.causalclustering.discovery.HazelcastDiscoveryServiceFactory in project neo4j by neo4j.
the class CatchupStoreCopyInteractionStressTesting method shouldBehaveCorrectlyUnderStress.
@Test
public void shouldBehaveCorrectlyUnderStress() throws Exception {
int numberOfCores = parseInt(fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_CORES", DEFAULT_NUMBER_OF_CORES));
int numberOfEdges = parseInt(fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_EDGES", DEFAULT_NUMBER_OF_EDGES));
long durationInMinutes = parseLong(fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_DURATION", DEFAULT_DURATION_IN_MINUTES));
String workingDirectory = fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_WORKING_DIRECTORY", DEFAULT_WORKING_DIR);
boolean enableIndexes = parseBoolean(fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_ENABLE_INDEXES", DEFAULT_ENABLE_INDEXES));
String txPrune = fromEnv("CATCHUP_STORE_COPY_INTERACTION_STRESS_TX_PRUNE", DEFAULT_TX_PRUNE);
File clusterDirectory = ensureExistsAndEmpty(new File(workingDirectory, "cluster"));
Map<String, String> coreParams = enableRaftMessageLogging(configureRaftLogRotationAndPruning(configureTxLogRotationAndPruning(new HashMap<>(), txPrune)));
Map<String, String> edgeParams = configureTxLogRotationAndPruning(new HashMap<>(), txPrune);
HazelcastDiscoveryServiceFactory discoveryServiceFactory = new HazelcastDiscoveryServiceFactory();
Cluster cluster = new Cluster(clusterDirectory, numberOfCores, numberOfEdges, discoveryServiceFactory, coreParams, emptyMap(), edgeParams, emptyMap(), Standard.LATEST_NAME);
AtomicBoolean stopTheWorld = new AtomicBoolean();
BooleanSupplier notExpired = untilTimeExpired(durationInMinutes, MINUTES);
BooleanSupplier keepGoing = () -> !stopTheWorld.get() && notExpired.getAsBoolean();
Runnable onFailure = () -> stopTheWorld.set(true);
ExecutorService service = Executors.newCachedThreadPool();
try {
cluster.start();
if (enableIndexes) {
Workload.setupIndexes(cluster);
}
Future<Throwable> workload = service.submit(new Workload(keepGoing, onFailure, cluster));
Future<Throwable> startStopWorker = service.submit(new StartStopLoad(fs, pageCache, keepGoing, onFailure, cluster, numberOfCores, numberOfEdges));
Future<Throwable> catchUpWorker = service.submit(new CatchUpLoad(keepGoing, onFailure, cluster));
long timeout = durationInMinutes + 5;
assertNull(Exceptions.stringify(workload.get()), workload.get(timeout, MINUTES));
assertNull(Exceptions.stringify(startStopWorker.get()), startStopWorker.get(timeout, MINUTES));
assertNull(Exceptions.stringify(catchUpWorker.get()), catchUpWorker.get(timeout, MINUTES));
} finally {
cluster.shutdown();
service.shutdown();
}
// let's cleanup disk space when everything went well
FileUtils.deleteRecursively(clusterDirectory);
}
Aggregations