use of org.neo4j.causalclustering.discovery.ReadReplica in project neo4j by neo4j.
the class TransactionLogRecoveryIT method readReplicaShouldStartAfterPartialTransactionWriteCrash.
@Test
public void readReplicaShouldStartAfterPartialTransactionWriteCrash() throws Exception {
// given: a fully synced cluster with some data
dataMatchesEventually(createEmptyNodes(cluster, 10), cluster.readReplicas());
// when: shutting down a read replica
ReadReplica readReplica = cluster.getReadReplicaById(0);
readReplica.shutdown();
// and making sure there will be something new to pull
CoreClusterMember lastWrites = createEmptyNodes(cluster, 10);
dataMatchesEventually(lastWrites, cluster.coreMembers());
// and writing a partial tx
writePartialTx(readReplica.storeDir());
// then: we should still be able to start
readReplica.start();
// and become fully synced again
dataMatchesEventually(lastWrites, singletonList(readReplica));
}
use of org.neo4j.causalclustering.discovery.ReadReplica in project neo4j by neo4j.
the class CoreEdgeMetricsIT method shouldMonitorCoreEdge.
@Test
public void shouldMonitorCoreEdge() throws Exception {
// given
cluster = clusterRule.startCluster();
// when
CoreGraphDatabase coreDB = cluster.awaitLeader(5, TimeUnit.SECONDS).database();
try (Transaction tx = coreDB.beginTx()) {
Node node = coreDB.createNode(label("boo"));
node.setProperty("foobar", "baz_bat");
tx.success();
}
// then
for (CoreClusterMember db : cluster.coreMembers()) {
assertAllNodesVisible(db.database());
}
for (ReadReplica db : cluster.readReplicas()) {
assertAllNodesVisible(db.database());
}
File coreMetricsDir = new File(cluster.getCoreMemberById(0).homeDir(), csvPath.getDefaultValue());
assertEventually("append index eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.APPEND_INDEX)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("commit index eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.COMMIT_INDEX)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("term eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.TERM)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("leader not found eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.LEADER_NOT_FOUND)), equalTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("tx pull requests received eventually accurate", () -> {
long total = 0;
for (final File homeDir : cluster.coreMembers().stream().map(CoreClusterMember::homeDir).collect(Collectors.toList())) {
File metricsDir = new File(homeDir, "metrics");
total += readLongValue(metricsCsv(metricsDir, CatchUpMetrics.TX_PULL_REQUESTS_RECEIVED));
}
return total;
}, greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("tx retries eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.TX_RETRIES)), equalTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("is leader eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.IS_LEADER)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
File readReplicaMetricsDir = new File(cluster.getReadReplicaById(0).homeDir(), "metrics");
assertEventually("pull update request registered", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATES)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("pull update request registered", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATE_HIGHEST_TX_ID_REQUESTED)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("pull update response received", () -> readLongValue(metricsCsv(readReplicaMetricsDir, PULL_UPDATE_HIGHEST_TX_ID_RECEIVED)), greaterThan(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("dropped messages eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.DROPPED_MESSAGES)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
assertEventually("queue size eventually accurate", () -> readLongValue(metricsCsv(coreMetricsDir, CoreMetrics.QUEUE_SIZE)), greaterThanOrEqualTo(0L), TIMEOUT, TimeUnit.SECONDS);
}
Aggregations