use of org.apache.cassandra.distributed.api.ICoordinator in project cassandra by apache.
the class ReadRepairTest method testGCableTombstoneResurrectionOnRangeSliceQuery.
/**
* Range queries before CASSANDRA-11427 will trigger read repairs for puregable tombstones on hosts that already
* compacted given tombstones. This will result in constant transfer and compaction actions sourced by few nodes
* seeding purgeable tombstones and triggered e.g. by periodical jobs scanning data range wise.
* <p>
* See CASSANDRA-11427.
* <p>
* Migrated from Python dtest read_repair_test.py:TestReadRepair.test_gcable_tombstone_resurrection_on_range_slice_query()
*/
@Test
public void testGCableTombstoneResurrectionOnRangeSliceQuery() throws Throwable {
try (Cluster cluster = init(Cluster.create(2))) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, PRIMARY KEY(k, c)) " + "WITH gc_grace_seconds=0 AND compaction = " + "{'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'}"));
ICoordinator coordinator = cluster.coordinator(1);
// insert some data
coordinator.execute(withKeyspace("INSERT INTO %s.t(k, c) VALUES (0, 0)"), ALL);
coordinator.execute(withKeyspace("INSERT INTO %s.t(k, c) VALUES (1, 1)"), ALL);
// create partition tombstones in all nodes for both existent and not existent partitions
// exists
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=0"), ALL);
// doesn't exist
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=2"), ALL);
// create row tombstones in all nodes for both existent and not existent rows
// exists
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=1 AND c=1"), ALL);
// doesn't exist
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=3 AND c=1"), ALL);
// flush single sstable with tombstones
cluster.get(1).flush(KEYSPACE);
cluster.get(2).flush(KEYSPACE);
// purge tombstones from node2 with compaction (gc_grace_seconds=0)
cluster.get(2).forceCompact(KEYSPACE, "t");
// run an unrestricted range query verifying that it doesn't trigger read repair
coordinator.execute(withKeyspace("SELECT * FROM %s.t"), ALL);
long requests = ReadRepairTester.readRepairRequestsCount(cluster.get(1), "t");
assertEquals("No read repair requests were expected, found " + requests, 0, requests);
}
}
use of org.apache.cassandra.distributed.api.ICoordinator in project cassandra by apache.
the class GroupByTest method testGroupWithDeletesAndPaging.
@Test
public void testGroupWithDeletesAndPaging() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).withConfig(cfg -> cfg.with(Feature.GOSSIP, NETWORK, NATIVE_PROTOCOL)).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, PRIMARY KEY (pk, ck))"));
ICoordinator coordinator = cluster.coordinator(1);
coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, 0)"), ConsistencyLevel.ALL);
coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (1, 1)"), ConsistencyLevel.ALL);
cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck=0"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=1 AND ck=1"));
String query = withKeyspace("SELECT * FROM %s.tbl GROUP BY pk");
Iterator<Object[]> rows = coordinator.executeWithPaging(query, ConsistencyLevel.ALL, 1);
assertRows(Iterators.toArray(rows, Object[].class));
try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session session = c.connect()) {
SimpleStatement stmt = new SimpleStatement(withKeyspace("select * from %s.tbl where pk = 1 group by pk"));
stmt.setFetchSize(1);
Iterator<Row> rs = session.execute(stmt).iterator();
Assert.assertFalse(rs.hasNext());
}
}
}
use of org.apache.cassandra.distributed.api.ICoordinator in project cassandra by apache.
the class HintsServiceMetricsTest method testHintsServiceMetrics.
@Test
public void testHintsServiceMetrics() throws Exception {
// setup a 3-node cluster with a bytebuddy injection that makes the writting of some hints to fail
try (Cluster cluster = builder().withNodes(3).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).withInstanceInitializer(FailHints::install).start()) {
// setup a message filter to drop some of the hint request messages from node1
AtomicInteger hintsNode2 = new AtomicInteger();
AtomicInteger hintsNode3 = new AtomicInteger();
cluster.filters().verbs(Verb.HINT_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && hintsNode2.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE) || (to == 3 && hintsNode3.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE)).drop();
// setup a message filter to drop mutations requests from node1, so it creates hints for those mutations
AtomicBoolean dropWritesForNode2 = new AtomicBoolean(false);
AtomicBoolean dropWritesForNode3 = new AtomicBoolean(false);
cluster.filters().verbs(Verb.MUTATION_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && dropWritesForNode2.get()) || (to == 3 && dropWritesForNode3.get())).drop();
// fix under replicated keyspaces so they don't produce hint requests while we are dropping mutations
fixDistributedSchemas(cluster);
cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"));
cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int PRIMARY KEY, v int)"));
ICoordinator coordinator = cluster.coordinator(1);
IInvokableInstance node1 = cluster.get(1);
IInvokableInstance node2 = cluster.get(2);
IInvokableInstance node3 = cluster.get(3);
// write the first half of the rows with the second node dropping mutation requests,
// so some hints will be created for that node
dropWritesForNode2.set(true);
for (int i = 0; i < NUM_ROWS / 2; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
dropWritesForNode2.set(false);
// write the second half of the rows with the third node dropping mutations requests,
// so some hints will be created for that node
dropWritesForNode3.set(true);
for (int i = NUM_ROWS / 2; i < NUM_ROWS; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
dropWritesForNode3.set(false);
// wait until all the hints have been successfully applied to the nodes that have been dropping mutations
waitUntilAsserted(() -> assertThat(countRows(node2)).isEqualTo(countRows(node3)).isEqualTo(NUM_ROWS));
// Verify the metrics for the coordinator node, which is the only one actually sending hints.
// The hint delivery errors that we have injected should have made the service try to send them again.
// These retries are done periodically and in pages, so the retries may send again some of the hints that
// were already successfully sent. This way, there may be more succeeded hints than actual hints/rows.
waitUntilAsserted(() -> assertThat(countHintsSucceeded(node1)).isGreaterThanOrEqualTo(NUM_ROWS));
waitUntilAsserted(() -> assertThat(countHintsFailed(node1)).isEqualTo(NUM_FAILURES_PER_NODE * 2));
waitUntilAsserted(() -> assertThat(countHintsTimedOut(node1)).isEqualTo(NUM_TIMEOUTS_PER_NODE * 2));
// verify delay metrics
long numGlobalDelays = countGlobalDelays(node1);
assertThat(numGlobalDelays).isGreaterThanOrEqualTo(NUM_ROWS);
assertThat(countEndpointDelays(node1, node1)).isEqualTo(0);
assertThat(countEndpointDelays(node1, node2)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
assertThat(countEndpointDelays(node1, node3)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
assertThat(countEndpointDelays(node1, node2) + countEndpointDelays(node1, node3)).isGreaterThanOrEqualTo(numGlobalDelays);
// verify that the metrics for the not-coordinator nodes are zero
for (IInvokableInstance node : Arrays.asList(node2, node3)) {
assertThat(countHintsSucceeded(node)).isEqualTo(0);
assertThat(countHintsFailed(node)).isEqualTo(0);
assertThat(countHintsTimedOut(node)).isEqualTo(0);
assertThat(countGlobalDelays(node)).isEqualTo(0);
cluster.forEach(target -> assertThat(countEndpointDelays(node, target)).isEqualTo(0));
}
}
}
use of org.apache.cassandra.distributed.api.ICoordinator in project cassandra by apache.
the class AbstractClientSizeWarning method failThreshold.
public void failThreshold(String cql) throws UnknownHostException {
ICoordinator node = CLUSTER.coordinator(1);
for (int i = 0; i < failThresholdRowCount(); i++) node.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
if (shouldFlush())
CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
enable(true);
checkpointHistogram();
List<String> warnings = CLUSTER.get(1).callsOnInstance(() -> {
ClientWarn.instance.captureWarnings();
CoordinatorWarnings.init();
try {
QueryProcessor.execute(cql, org.apache.cassandra.db.ConsistencyLevel.ALL, QueryState.forInternalCalls());
Assert.fail("Expected query failure");
} catch (ReadSizeAbortException e) {
// expected, client transport returns an error message and includes client warnings
}
CoordinatorWarnings.done();
CoordinatorWarnings.reset();
return ClientWarn.instance.getWarnings();
}).call();
assertAbortWarnings(warnings);
assertHistogramUpdated();
assertWarnAborts(0, 1, 1);
try {
driverQueryAll(cql);
Assert.fail("Query should have thrown ReadFailureException");
} catch (com.datastax.driver.core.exceptions.ReadFailureException e) {
// without changing the client can't produce a better message...
// client does NOT include the message sent from the server in the exception; so the message doesn't work
// well in this case
assertThat(e.getMessage()).contains("responses were required but only 0 replica responded");
ImmutableSet<InetAddress> expectedKeys = ImmutableSet.of(InetAddress.getByAddress(new byte[] { 127, 0, 0, 1 }), InetAddress.getByAddress(new byte[] { 127, 0, 0, 2 }), InetAddress.getByAddress(new byte[] { 127, 0, 0, 3 }));
assertThat(e.getFailuresMap()).hasSizeBetween(1, 3).containsValue(RequestFailureReason.READ_SIZE.code).hasKeySatisfying(new Condition<InetAddress>() {
public boolean matches(InetAddress value) {
return expectedKeys.contains(value);
}
});
}
assertHistogramUpdated();
assertWarnAborts(0, 2, 1);
// query should no longer fail
enable(false);
SimpleQueryResult result = node.executeWithResult(cql, ConsistencyLevel.ALL);
assertThat(result.warnings()).isEmpty();
assertHistogramNotUpdated();
assertThat(driverQueryAll(cql).getExecutionInfo().getWarnings()).isEmpty();
assertHistogramNotUpdated();
assertWarnAborts(0, 2, 0);
}
use of org.apache.cassandra.distributed.api.ICoordinator in project cassandra by apache.
the class ReadRepairTester method queryDistributed.
private Object[][] queryDistributed(String query, Object... boundValues) {
String formattedQuery = String.format(query, qualifiedTableName);
ICoordinator coordinator = cluster.coordinator(this.coordinator);
return paging ? Iterators.toArray(coordinator.executeWithPaging(formattedQuery, ALL, 1, boundValues), Object[].class) : coordinator.execute(formattedQuery, ALL, boundValues);
}
Aggregations