use of org.neo4j.causalclustering.core.CoreGraphDatabase in project neo4j by neo4j.
the class ReadReplicaReplicationIT method shouldBeAbleToCopyStoresFromCoreToReadReplica.
@Test
public void shouldBeAbleToCopyStoresFromCoreToReadReplica() throws Exception {
// given
Map<String, String> params = stringMap(CausalClusteringSettings.raft_log_rotation_size.name(), "1k", CausalClusteringSettings.raft_log_pruning_frequency.name(), "500ms", CausalClusteringSettings.state_machine_flush_window_size.name(), "1", CausalClusteringSettings.raft_log_pruning_strategy.name(), "1 entries");
Cluster cluster = clusterRule.withNumberOfReadReplicas(0).withSharedCoreParams(params).withRecordFormat(HighLimit.NAME).startCluster();
cluster.coreTx((db, tx) -> {
Node node = db.createNode(Label.label("L"));
for (int i = 0; i < 10; i++) {
node.setProperty("prop-" + i, "this is a quite long string to get to the log limit soonish");
}
tx.success();
});
long baseVersion = versionBy(cluster.awaitLeader().raftLogDirectory(), Math::max);
CoreClusterMember coreGraphDatabase = null;
for (int j = 0; j < 2; j++) {
coreGraphDatabase = cluster.coreTx((db, tx) -> {
Node node = db.createNode(Label.label("L"));
for (int i = 0; i < 10; i++) {
node.setProperty("prop-" + i, "this is a quite long string to get to the log limit soonish");
}
tx.success();
});
}
File raftLogDir = coreGraphDatabase.raftLogDirectory();
assertEventually("pruning happened", () -> versionBy(raftLogDir, Math::min), greaterThan(baseVersion), 5, SECONDS);
// when
cluster.addReadReplicaWithIdAndRecordFormat(4, HighLimit.NAME).start();
// then
for (final ReadReplica readReplica : cluster.readReplicas()) {
assertEventually("read replica available", () -> readReplica.database().isAvailable(0), is(true), 10, SECONDS);
}
}
use of org.neo4j.causalclustering.core.CoreGraphDatabase in project neo4j by neo4j.
the class ConvertNonCausalClusteringStoreIT method shouldReplicateTransactionToCoreMembers.
@Test
public void shouldReplicateTransactionToCoreMembers() throws Throwable {
// given
File dbDir = clusterRule.testDirectory().cleanDirectory("classic-db");
int classicNodeCount = 1024;
File classicNeo4jStore = createNeoStore(dbDir, classicNodeCount);
Cluster cluster = this.clusterRule.withRecordFormat(recordFormat).createCluster();
try (DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction()) {
for (CoreClusterMember core : cluster.coreMembers()) {
fileSystem.copyRecursively(classicNeo4jStore, core.storeDir());
}
}
cluster.start();
// when
cluster.coreTx((coreDB, tx) -> {
Node node = coreDB.createNode(label("boo"));
node.setProperty("foobar", "baz_bat");
tx.success();
});
cluster.addReadReplicaWithIdAndRecordFormat(4, recordFormat).start();
// then
for (final CoreClusterMember server : cluster.coreMembers()) {
CoreGraphDatabase db = server.database();
try (Transaction tx = db.beginTx()) {
ThrowingSupplier<Long, Exception> nodeCount = () -> count(db.getAllNodes());
Config config = db.getDependencyResolver().resolveDependency(Config.class);
assertEventually("node to appear on core server " + config.get(raft_advertised_address), nodeCount, greaterThan((long) classicNodeCount), 15, SECONDS);
assertEquals(classicNodeCount + 1, count(db.getAllNodes()));
tx.success();
}
}
}
use of org.neo4j.causalclustering.core.CoreGraphDatabase in project neo4j by neo4j.
the class CoreReplicationIT method shouldNotAllowTokenCreationFromAFollowerWithNoInitialTokens.
@Test
public void shouldNotAllowTokenCreationFromAFollowerWithNoInitialTokens() throws Exception {
// given
CoreClusterMember leader = cluster.coreTx((db, tx) -> {
db.createNode();
tx.success();
});
awaitForDataToBeApplied(leader);
dataMatchesEventually(leader, cluster.coreMembers());
CoreGraphDatabase follower = cluster.getDbWithRole(Role.FOLLOWER).database();
// when
try (Transaction tx = follower.beginTx()) {
follower.getAllNodes().iterator().next().setProperty("name", "Mark");
tx.success();
fail("Should have thrown exception");
} catch (WriteOperationsNotAllowedException ignored) {
// expected
assertThat(ignored.getMessage(), containsString("No write operations are allowed"));
}
}
use of org.neo4j.causalclustering.core.CoreGraphDatabase in project neo4j by neo4j.
the class ServerPoliciesLoadBalancingIT method shouldSupportSeveralPolicies.
@Test
public void shouldSupportSeveralPolicies() throws Exception {
Map<String, IntFunction<String>> instanceCoreParams = new HashMap<>();
instanceCoreParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "core" + id + ",core");
Map<String, IntFunction<String>> instanceReplicaParams = new HashMap<>();
instanceReplicaParams.put(CausalClusteringSettings.server_groups.name(), (id) -> "replica" + id + ",replica");
String defaultPolicySpec = "groups(replica0,replica1)";
String policyOneTwoSpec = "groups(replica1,replica2)";
String policyZeroTwoSpec = "groups(replica0,replica2)";
String policyAllReplicasSpec = "groups(replica); halt()";
String allPolicySpec = "all()";
Map<String, String> coreParams = stringMap(CausalClusteringSettings.cluster_allow_reads_on_followers.name(), "true", CausalClusteringSettings.load_balancing_config.name() + ".server_policies.all", allPolicySpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.default", defaultPolicySpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_one_two", policyOneTwoSpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_zero_two", policyZeroTwoSpec, CausalClusteringSettings.load_balancing_config.name() + ".server_policies.policy_all_replicas", policyAllReplicasSpec, CausalClusteringSettings.multi_dc_license.name(), "true");
cluster = new Cluster(testDir.directory("cluster"), 3, 3, new HazelcastDiscoveryServiceFactory(), coreParams, instanceCoreParams, emptyMap(), instanceReplicaParams, Standard.LATEST_NAME);
cluster.start();
assertGetServersEventuallyMatchesOnAllCores(new CountsMatcher(3, 1, 2, 3), policyContext("all"));
for (CoreClusterMember core : cluster.coreMembers()) {
CoreGraphDatabase db = core.database();
assertThat(getServers(db, policyContext("default")), new SpecificReplicasMatcher(0, 1));
assertThat(getServers(db, policyContext("policy_one_two")), new SpecificReplicasMatcher(1, 2));
assertThat(getServers(db, policyContext("policy_zero_two")), new SpecificReplicasMatcher(0, 2));
assertThat(getServers(db, policyContext("policy_all_replicas")), new SpecificReplicasMatcher(0, 1, 2));
}
}
use of org.neo4j.causalclustering.core.CoreGraphDatabase in project neo4j by neo4j.
the class CoreEdgeMetricsIT method shouldMonitorCoreEdge.
@Test
public void shouldMonitorCoreEdge() throws Exception {
// given
cluster = clusterRule.startCluster();
// when
CoreGraphDatabase coreDB = cluster.awaitLeader(5, TimeUnit.SECONDS).database();
try (Transaction tx = coreDB.beginTx()) {
Node node = coreDB.createNode(label("boo"));
node.setProperty("foobar", "baz_bat");
tx.success();
}
// then
for (CoreClusterMember db : cluster.coreMembers()) {
assertAllNodesVisible(db.database());
}
for (ReadReplica db : cluster.readReplicas()) {
assertAllNodesVisible(db.database());
}
File coreMetricsDir = new File(cluster.getCoreMemberById(0).homeDir(), csvPath.getDefaultValue());
assertEventually("append index eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.APPEND_INDEX)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("commit index eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.COMMIT_INDEX)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("term eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.TERM)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("leader not found eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.LEADER_NOT_FOUND)), equalTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("tx pull requests received eventually accurate", () -> {
long total = 0;
for (final File homeDir : cluster.coreMembers().stream().map(CoreClusterMember::homeDir).collect(Collectors.toList())) {
File metricsDir = new File(homeDir, "metrics");
total += readLongValue(metricsCsv(metricsDir, CatchUpMetrics.TX_PULL_REQUESTS_RECEIVED));
}
return total;
}, greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("tx retries eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.TX_RETRIES)), equalTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("is leader eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.IS_LEADER)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
File readReplicaMetricsDir = new File(cluster.getReadReplicaById(0).homeDir(), "metrics");
assertEventually("pull update request registered", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATES)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("pull update request registered", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATE_HIGHEST_TX_ID_REQUESTED)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("pull update response received", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATE_HIGHEST_TX_ID_RECEIVED)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("dropped messages eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.DROPPED_MESSAGES)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("queue size eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.QUEUE_SIZE)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
}
Aggregations