use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class MixedModeFuzzTest method mixedModeFuzzTest.
@Test
public void mixedModeFuzzTest() throws Throwable {
try (ICluster<IInvokableInstance> c = builder().withNodes(2).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::oldNewBehaviour).start()) {
// Long string to make us invalidate caches occasionally
String veryLongString = "very";
for (int i = 0; i < 2; i++) veryLongString += veryLongString;
final String qualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM ks%d.tbl";
final String unqualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM tbl";
int KEYSPACES = 3;
final int STATEMENTS_PER_KS = 2;
for (int i = 0; i < KEYSPACES; i++) {
c.schemaChange(withKeyspace("CREATE KEYSPACE ks" + i + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};"));
c.schemaChange(withKeyspace("CREATE TABLE ks" + i + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck));"));
for (int j = 0; j < i; j++) c.coordinator(1).execute("INSERT INTO ks" + i + ".tbl (pk, ck) VALUES (?, ?)", ConsistencyLevel.ALL, 1, j);
}
List<Thread> threads = new ArrayList<>();
AtomicBoolean interrupt = new AtomicBoolean(false);
AtomicReference<Throwable> thrown = new AtomicReference<>();
int INFREQUENT_ACTION_COEF = 100;
long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(20);
for (int i = 0; i < 3; i++) {
int seed = i;
threads.add(new Thread(() -> {
com.datastax.driver.core.Cluster cluster = null;
Map<String, Session> sessions = new HashMap<>();
try {
AtomicBoolean nodeWithFix = new AtomicBoolean(false);
Supplier<Cluster> clusterSupplier = () -> {
return com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").addContactPoint("127.0.0.2").build();
};
AtomicBoolean allUpgraded = new AtomicBoolean(false);
Random rng = new Random(seed);
boolean reconnected = false;
Map<Pair<Integer, Integer>, PreparedStatement> qualifiedStatements = new HashMap<>();
Map<Pair<Integer, Integer>, PreparedStatement> unqualifiedStatements = new HashMap<>();
cluster = clusterSupplier.get();
for (int j = 0; j < KEYSPACES; j++) {
String ks = "ks" + j;
sessions.put(ks, cluster.connect(ks));
Assert.assertEquals(sessions.get(ks).getLoggedKeyspace(), ks);
}
long firstVersionBump = System.nanoTime() + TimeUnit.SECONDS.toNanos(5);
long reconnectAfter = System.nanoTime() + TimeUnit.SECONDS.toNanos(15);
while (!interrupt.get() && (System.nanoTime() < deadline)) {
nodeWithFix.set(rng.nextBoolean());
final int ks = rng.nextInt(KEYSPACES);
final int statementIdx = rng.nextInt(STATEMENTS_PER_KS);
final Pair<Integer, Integer> statementId = Pair.create(ks, statementIdx);
int v = rng.nextInt(INFREQUENT_ACTION_COEF + 1);
Action[] pool;
if (v == INFREQUENT_ACTION_COEF)
pool = infrequent;
else
pool = frequent;
Action action = pool[rng.nextInt(pool.length)];
// logger.info(String.format("Executing %s on the node %s. ks %d", action, nodeWithFix.get() ? "1" : "2", ks));
switch(action) {
case BUMP_VERSION:
if (System.nanoTime() < firstVersionBump)
break;
c.stream().forEach(node -> node.runOnInstance(() -> {
if (version.get().equals(INITIAL_VERSION)) {
CassandraVersion upgradeTo = QueryProcessor.NEW_PREPARED_STATEMENT_BEHAVIOUR_SINCE_40;
while (!version.get().equals(upgradeTo)) {
if (version.compareAndSet(INITIAL_VERSION, upgradeTo)) {
logger.info("Bumped version to " + upgradeTo);
break;
}
}
}
}));
break;
case EXECUTE_QUALIFIED:
if (!qualifiedStatements.containsKey(statementId))
continue;
try {
int counter = 0;
BoundStatement boundStatement = qualifiedStatements.get(statementId).bind();
boundStatement.setHost(getHost(cluster, nodeWithFix.get()));
for (Iterator<Object[]> iter = RowUtil.toObjects(sessions.get("ks" + ks).execute(boundStatement)); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
if (nodeWithFix.get())
Assert.assertEquals(ks, counter);
} catch (Throwable t) {
if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
continue;
throw t;
}
break;
case EXECUTE_UNQUALIFIED:
if (!unqualifiedStatements.containsKey(statementId))
continue;
try {
BoundStatement boundStatement = unqualifiedStatements.get(statementId).bind();
boundStatement.setHost(getHost(cluster, nodeWithFix.get()));
int counter = 0;
for (Iterator<Object[]> iter = RowUtil.toObjects(sessions.get("ks" + ks).execute(boundStatement)); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
if (nodeWithFix.get() && allUpgraded.get()) {
Assert.assertEquals(unqualifiedStatements.get(statementId).getQueryKeyspace() + " " + ks + " " + statementId, ks, counter);
}
} catch (Throwable t) {
if (t.getMessage().contains("ID mismatch while trying to reprepare") || (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))) {
logger.info("Detected id mismatch, skipping as it is expected: ");
continue;
}
throw t;
}
break;
case FORGET_PREPARED:
Map<Pair<Integer, Integer>, PreparedStatement> toCleanup = rng.nextBoolean() ? qualifiedStatements : unqualifiedStatements;
Set<Pair<Integer, Integer>> toDrop = new HashSet<>();
for (Pair<Integer, Integer> e : toCleanup.keySet()) {
if (rng.nextBoolean())
toDrop.add(e);
}
for (Pair<Integer, Integer> e : toDrop) toCleanup.remove(e);
toDrop.clear();
break;
case CLEAR_CACHES:
if (!nodeWithFix.get() && !allUpgraded.get())
continue;
c.get(nodeWithFix.get() ? 1 : 2).runOnInstance(() -> {
SystemKeyspace.loadPreparedStatements((id, query, keyspace) -> {
if (rng.nextBoolean())
QueryProcessor.instance.evictPrepared(id);
return true;
});
});
break;
case PREPARE_QUALIFIED:
if (unqualifiedStatements.containsKey(statementId))
continue;
try {
String qs = String.format(qualified, statementIdx, statementIdx, ks);
String keyspace = "ks" + ks;
PreparedStatement preparedQualified = sessions.get("ks" + ks).prepare(qs);
// With prepared qualified, keyspace will be set to the keyspace of the statement when it was first executed
if (allUpgraded.get())
PreparedStatementHelper.assertHashWithoutKeyspace(preparedQualified, qs, keyspace);
qualifiedStatements.put(statementId, preparedQualified);
} catch (Throwable t) {
throw t;
}
break;
case PREPARE_UNQUALIFIED:
if (unqualifiedStatements.containsKey(statementId))
continue;
try {
String qs = String.format(unqualified, statementIdx, statementIdx);
// we don't know where it's going to be executed
PreparedStatement preparedUnqalified = sessions.get("ks" + ks).prepare(qs);
unqualifiedStatements.put(Pair.create(ks, statementIdx), preparedUnqalified);
} catch (InvalidQueryException iqe) {
if (!iqe.getMessage().contains("No keyspace has been"))
throw iqe;
} catch (Throwable t) {
throw t;
}
break;
case BOUNCE_CLIENT:
if (System.nanoTime() < reconnectAfter)
break;
if (!reconnected) {
for (Session s : sessions.values()) s.close();
cluster.close();
cluster = clusterSupplier.get();
for (int j = 0; j < KEYSPACES; j++) sessions.put("ks" + j, cluster.connect("ks" + j));
qualifiedStatements.clear();
unqualifiedStatements.clear();
reconnected = true;
}
break;
}
}
} catch (Throwable t) {
interrupt.set(true);
t.printStackTrace();
while (true) {
Throwable seen = thrown.get();
Throwable merged = Throwables.merge(seen, t);
if (thrown.compareAndSet(seen, merged))
break;
}
throw t;
} finally {
logger.info("Exiting...");
if (cluster != null)
cluster.close();
}
}));
}
for (Thread thread : threads) thread.start();
for (Thread thread : threads) thread.join();
if (thrown.get() != null)
throw thrown.get();
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class NativeMixedVersionTest method v4ConnectionCleansUpThreadLocalState.
@Test
public void v4ConnectionCleansUpThreadLocalState() throws IOException {
// make sure to limit the netty thread pool to size 1, this will make the test determanistic as all work
// will happen on the single thread.
System.setProperty("io.netty.eventLoopThreads", "1");
try (Cluster cluster = Cluster.build(1).withConfig(c -> c.with(Feature.values()).set("track_warnings", ImmutableMap.of("enabled", true, "local_read_size", ImmutableMap.of("warn_threshold_kb", 1)))).start()) {
init(cluster);
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck1 int, value blob, PRIMARY KEY (pk, ck1))"));
IInvokableInstance node = cluster.get(1);
ByteBuffer blob = ByteBuffer.wrap("This is just some large string to get some number of bytes".getBytes(StandardCharsets.UTF_8));
for (int i = 0; i < 100; i++) node.executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck1, value) VALUES (?, ?, ?)"), 0, i, blob);
// does not support warnings)
try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V5);
Session session = driver.connect()) {
ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isNotEmpty();
}
try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V3);
Session session = driver.connect()) {
ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
}
// this should not happen; so make sure no logs are found
List<String> result = node.logs().grep("Warnings present in message with version less than").getResult();
Assertions.assertThat(result).isEmpty();
} finally {
System.clearProperty("io.netty.eventLoopThreads");
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class FqlReplayDDLExclusionTest method test.
@Ignore
@Test
public void test() throws Throwable {
try (final Cluster cluster = init(builder().withNodes(1).withConfig(updater -> updater.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start())) {
final IInvokableInstance node = cluster.get(1);
// in Cassandra where events are propagated to logger
try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session s = c.connect()) {
s.execute("CREATE KEYSPACE fql_ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
node.nodetool("enablefullquerylog", "--path", temporaryFolder.getRoot().getAbsolutePath());
s.execute("CREATE TABLE fql_ks.fql_table (id int primary key);");
s.execute("INSERT INTO fql_ks.fql_table (id) VALUES (1)");
node.nodetool("disablefullquerylog");
// here we are dropping and we expect that ddl replay will reconstruct it
node.executeInternal("DROP TABLE fql_ks.fql_table;");
// without --replay-ddl-statements, the replay will fail on insert because underlying table is not there
final ToolResult negativeRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", "--", temporaryFolder.getRoot().getAbsolutePath());
assertEquals(0, negativeRunner.getExitCode());
try {
node.executeInternalWithResult("SELECT * from fql_ks.fql_table");
fail("This query should fail because we do not expect fql_ks.fql_table to be created!");
} catch (final Exception ex) {
assertTrue(ex.getMessage().contains("table fql_table does not exist"));
}
// here we replay with --replay-ddl-statements so table will be created and insert will succeed
final ToolResult positiveRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", // important
"--replay-ddl-statements", "--", temporaryFolder.getRoot().getAbsolutePath());
assertEquals(0, positiveRunner.getExitCode());
assertRows(node.executeInternalWithResult("SELECT * from fql_ks.fql_table"), QueryResults.builder().row(1).build());
}
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class ReprepareFuzzTest method fuzzTest.
@Test
public void fuzzTest() throws Throwable {
try (ICluster<IInvokableInstance> c = builder().withNodes(1).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::alwaysNewBehaviour).start()) {
// Long string to make us invalidate caches occasionally
String veryLongString = "very";
for (int i = 0; i < 2; i++) veryLongString += veryLongString;
final String qualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM ks%d.tbl";
final String unqualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM tbl";
int KEYSPACES = 3;
final int STATEMENTS_PER_KS = 3;
for (int i = 0; i < KEYSPACES; i++) {
c.schemaChange(withKeyspace("CREATE KEYSPACE ks" + i + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
c.schemaChange(withKeyspace("CREATE TABLE ks" + i + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck));"));
for (int j = 0; j < i; j++) c.coordinator(1).execute("INSERT INTO ks" + i + ".tbl (pk, ck) VALUES (?, ?)", ConsistencyLevel.QUORUM, 1, j);
}
List<Thread> threads = new ArrayList<>();
AtomicBoolean interrupt = new AtomicBoolean(false);
AtomicReference<Throwable> thrown = new AtomicReference<>();
int INFREQUENT_ACTION_COEF = 10;
long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(60);
for (int i = 0; i < FBUtilities.getAvailableProcessors() * 2; i++) {
int seed = i;
threads.add(new Thread(() -> {
com.datastax.driver.core.Cluster cluster = null;
Session session = null;
try {
Random rng = new Random(seed);
int usedKsIdx = -1;
String usedKs = null;
Map<Pair<Integer, Integer>, PreparedStatement> qualifiedStatements = new HashMap<>();
Map<Pair<Integer, Integer>, PreparedStatement> unqualifiedStatements = new HashMap<>();
cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
session = cluster.connect();
while (!interrupt.get() && (System.nanoTime() < deadline)) {
final int ks = rng.nextInt(KEYSPACES);
final int statementIdx = rng.nextInt(STATEMENTS_PER_KS);
final Pair<Integer, Integer> statementId = Pair.create(ks, statementIdx);
int v = rng.nextInt(INFREQUENT_ACTION_COEF + 1);
Action[] pool;
if (v == INFREQUENT_ACTION_COEF)
pool = infrequent;
else
pool = frequent;
Action action = pool[rng.nextInt(pool.length)];
switch(action) {
case EXECUTE_QUALIFIED:
if (!qualifiedStatements.containsKey(statementId))
continue;
try {
int counter = 0;
for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(qualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
Assert.assertEquals(ks, counter);
} catch (Throwable t) {
if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
continue;
throw t;
}
break;
case EXECUTE_UNQUALIFIED:
if (!unqualifiedStatements.containsKey(statementId))
continue;
try {
int counter = 0;
for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(unqualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
Assert.assertEquals(unqualifiedStatements.get(statementId).getQueryKeyspace() + " " + usedKs + " " + statementId, Integer.parseInt(unqualifiedStatements.get(statementId).getQueryKeyspace().replace("ks", "")), counter);
} catch (Throwable t) {
if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
continue;
throw t;
}
break;
case PREPARE_QUALIFIED:
{
String qs = String.format(qualified, statementIdx, statementIdx, ks);
String keyspace = "ks" + ks;
PreparedStatement preparedQualified = session.prepare(qs);
// With prepared qualified, keyspace will be set to the keyspace of the statement when it was first executed
PreparedStatementHelper.assertHashWithoutKeyspace(preparedQualified, qs, keyspace);
qualifiedStatements.put(statementId, preparedQualified);
}
break;
case PREPARE_UNQUALIFIED:
try {
String qs = String.format(unqualified, statementIdx, statementIdx, ks);
PreparedStatement preparedUnqalified = session.prepare(qs);
Assert.assertEquals(preparedUnqalified.getQueryKeyspace(), usedKs);
PreparedStatementHelper.assertHashWithKeyspace(preparedUnqalified, qs, usedKs);
unqualifiedStatements.put(Pair.create(usedKsIdx, statementIdx), preparedUnqalified);
} catch (InvalidQueryException iqe) {
if (!iqe.getMessage().contains("No keyspace has been"))
throw iqe;
} catch (Throwable t) {
if (usedKs == null) {
// ignored
continue;
}
throw t;
}
break;
case CLEAR_CACHES:
c.get(1).runOnInstance(() -> {
SystemKeyspace.loadPreparedStatements((id, query, keyspace) -> {
if (rng.nextBoolean())
QueryProcessor.instance.evictPrepared(id);
return true;
});
});
break;
case RELOAD_FROM_TABLES:
c.get(1).runOnInstance(QueryProcessor::clearPreparedStatementsCache);
c.get(1).runOnInstance(() -> QueryProcessor.instance.preloadPreparedStatements());
break;
case SWITCH_KEYSPACE:
usedKsIdx = ks;
usedKs = "ks" + ks;
session.execute("USE " + usedKs);
break;
case FORGET_PREPARED:
Map<Pair<Integer, Integer>, PreparedStatement> toCleanup = rng.nextBoolean() ? qualifiedStatements : unqualifiedStatements;
Set<Pair<Integer, Integer>> toDrop = new HashSet<>();
for (Pair<Integer, Integer> e : toCleanup.keySet()) {
if (rng.nextBoolean())
toDrop.add(e);
}
for (Pair<Integer, Integer> e : toDrop) toCleanup.remove(e);
toDrop.clear();
break;
case RECONNECT:
session.close();
cluster.close();
cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
session = cluster.connect();
qualifiedStatements.clear();
unqualifiedStatements.clear();
usedKs = null;
usedKsIdx = -1;
break;
}
}
} catch (Throwable t) {
interrupt.set(true);
t.printStackTrace();
while (true) {
Throwable seen = thrown.get();
Throwable merged = Throwables.merge(seen, t);
if (thrown.compareAndSet(seen, merged))
break;
}
throw t;
} finally {
if (session != null)
session.close();
if (cluster != null)
cluster.close();
}
}));
}
for (Thread thread : threads) thread.start();
for (Thread thread : threads) thread.join();
if (thrown.get() != null)
throw thrown.get();
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class HintsServiceMetricsTest method testHintsServiceMetrics.
@Test
public void testHintsServiceMetrics() throws Exception {
// setup a 3-node cluster with a bytebuddy injection that makes the writting of some hints to fail
try (Cluster cluster = builder().withNodes(3).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).withInstanceInitializer(FailHints::install).start()) {
// setup a message filter to drop some of the hint request messages from node1
AtomicInteger hintsNode2 = new AtomicInteger();
AtomicInteger hintsNode3 = new AtomicInteger();
cluster.filters().verbs(Verb.HINT_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && hintsNode2.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE) || (to == 3 && hintsNode3.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE)).drop();
// setup a message filter to drop mutations requests from node1, so it creates hints for those mutations
AtomicBoolean dropWritesForNode2 = new AtomicBoolean(false);
AtomicBoolean dropWritesForNode3 = new AtomicBoolean(false);
cluster.filters().verbs(Verb.MUTATION_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && dropWritesForNode2.get()) || (to == 3 && dropWritesForNode3.get())).drop();
// fix under replicated keyspaces so they don't produce hint requests while we are dropping mutations
fixDistributedSchemas(cluster);
cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"));
cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int PRIMARY KEY, v int)"));
ICoordinator coordinator = cluster.coordinator(1);
IInvokableInstance node1 = cluster.get(1);
IInvokableInstance node2 = cluster.get(2);
IInvokableInstance node3 = cluster.get(3);
// write the first half of the rows with the second node dropping mutation requests,
// so some hints will be created for that node
dropWritesForNode2.set(true);
for (int i = 0; i < NUM_ROWS / 2; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
dropWritesForNode2.set(false);
// write the second half of the rows with the third node dropping mutations requests,
// so some hints will be created for that node
dropWritesForNode3.set(true);
for (int i = NUM_ROWS / 2; i < NUM_ROWS; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
dropWritesForNode3.set(false);
// wait until all the hints have been successfully applied to the nodes that have been dropping mutations
waitUntilAsserted(() -> assertThat(countRows(node2)).isEqualTo(countRows(node3)).isEqualTo(NUM_ROWS));
// Verify the metrics for the coordinator node, which is the only one actually sending hints.
// The hint delivery errors that we have injected should have made the service try to send them again.
// These retries are done periodically and in pages, so the retries may send again some of the hints that
// were already successfully sent. This way, there may be more succeeded hints than actual hints/rows.
waitUntilAsserted(() -> assertThat(countHintsSucceeded(node1)).isGreaterThanOrEqualTo(NUM_ROWS));
waitUntilAsserted(() -> assertThat(countHintsFailed(node1)).isEqualTo(NUM_FAILURES_PER_NODE * 2));
waitUntilAsserted(() -> assertThat(countHintsTimedOut(node1)).isEqualTo(NUM_TIMEOUTS_PER_NODE * 2));
// verify delay metrics
long numGlobalDelays = countGlobalDelays(node1);
assertThat(numGlobalDelays).isGreaterThanOrEqualTo(NUM_ROWS);
assertThat(countEndpointDelays(node1, node1)).isEqualTo(0);
assertThat(countEndpointDelays(node1, node2)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
assertThat(countEndpointDelays(node1, node3)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
assertThat(countEndpointDelays(node1, node2) + countEndpointDelays(node1, node3)).isGreaterThanOrEqualTo(numGlobalDelays);
// verify that the metrics for the not-coordinator nodes are zero
for (IInvokableInstance node : Arrays.asList(node2, node3)) {
assertThat(countHintsSucceeded(node)).isEqualTo(0);
assertThat(countHintsFailed(node)).isEqualTo(0);
assertThat(countHintsTimedOut(node)).isEqualTo(0);
assertThat(countGlobalDelays(node)).isEqualTo(0);
cluster.forEach(target -> assertThat(countEndpointDelays(node, target)).isEqualTo(0));
}
}
}
Aggregations