use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class ReadRepairTest method testGCableTombstoneResurrectionOnRangeSliceQuery.
/**
* Range queries before CASSANDRA-11427 will trigger read repairs for puregable tombstones on hosts that already
* compacted given tombstones. This will result in constant transfer and compaction actions sourced by few nodes
* seeding purgeable tombstones and triggered e.g. by periodical jobs scanning data range wise.
* <p>
* See CASSANDRA-11427.
* <p>
* Migrated from Python dtest read_repair_test.py:TestReadRepair.test_gcable_tombstone_resurrection_on_range_slice_query()
*/
@Test
public void testGCableTombstoneResurrectionOnRangeSliceQuery() throws Throwable {
try (Cluster cluster = init(Cluster.create(2))) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, PRIMARY KEY(k, c)) " + "WITH gc_grace_seconds=0 AND compaction = " + "{'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'}"));
ICoordinator coordinator = cluster.coordinator(1);
// insert some data
coordinator.execute(withKeyspace("INSERT INTO %s.t(k, c) VALUES (0, 0)"), ALL);
coordinator.execute(withKeyspace("INSERT INTO %s.t(k, c) VALUES (1, 1)"), ALL);
// create partition tombstones in all nodes for both existent and not existent partitions
// exists
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=0"), ALL);
// doesn't exist
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=2"), ALL);
// create row tombstones in all nodes for both existent and not existent rows
// exists
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=1 AND c=1"), ALL);
// doesn't exist
coordinator.execute(withKeyspace("DELETE FROM %s.t WHERE k=3 AND c=1"), ALL);
// flush single sstable with tombstones
cluster.get(1).flush(KEYSPACE);
cluster.get(2).flush(KEYSPACE);
// purge tombstones from node2 with compaction (gc_grace_seconds=0)
cluster.get(2).forceCompact(KEYSPACE, "t");
// run an unrestricted range query verifying that it doesn't trigger read repair
coordinator.execute(withKeyspace("SELECT * FROM %s.t"), ALL);
long requests = ReadRepairTester.readRepairRequestsCount(cluster.get(1), "t");
assertEquals("No read repair requests were expected, found " + requests, 0, requests);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class ReadRepairTest method partitionDeletionRTTimestampTieTest.
@Test
public void partitionDeletionRTTimestampTieTest() throws Throwable {
try (Cluster cluster = init(builder().withNodes(3).withInstanceInitializer(RRHelper::install).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE distributed_test_keyspace.tbl0 (pk bigint,ck bigint,value bigint, PRIMARY KEY (pk, ck)) WITH CLUSTERING ORDER BY (ck ASC) AND read_repair='blocking';"));
long pk = 0L;
cluster.coordinator(1).execute("INSERT INTO distributed_test_keyspace.tbl0 (pk, ck, value) VALUES (?,?,?) USING TIMESTAMP 1", ConsistencyLevel.ALL, pk, 1L, 1L);
cluster.coordinator(1).execute("DELETE FROM distributed_test_keyspace.tbl0 USING TIMESTAMP 2 WHERE pk=? AND ck>?;", ConsistencyLevel.ALL, pk, 2L);
cluster.get(3).executeInternal("DELETE FROM distributed_test_keyspace.tbl0 USING TIMESTAMP 2 WHERE pk=?;", pk);
assertRows(cluster.coordinator(1).execute("SELECT * FROM distributed_test_keyspace.tbl0 WHERE pk=? AND ck>=? AND ck<?;", ConsistencyLevel.ALL, pk, 1L, 3L));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class FailingTruncationTest method testFailingTruncation.
@Test
public void testFailingTruncation() throws IOException {
try (Cluster cluster = init(Cluster.build(2).withInstanceInitializer(BBFailHelper::install).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
try {
cluster.coordinator(1).execute("TRUNCATE " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
fail("Truncate should fail on node 2");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Truncate failed on replica /127.0.0.2"));
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class FqlReplayDDLExclusionTest method test.
@Ignore
@Test
public void test() throws Throwable {
try (final Cluster cluster = init(builder().withNodes(1).withConfig(updater -> updater.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start())) {
final IInvokableInstance node = cluster.get(1);
// in Cassandra where events are propagated to logger
try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session s = c.connect()) {
s.execute("CREATE KEYSPACE fql_ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
node.nodetool("enablefullquerylog", "--path", temporaryFolder.getRoot().getAbsolutePath());
s.execute("CREATE TABLE fql_ks.fql_table (id int primary key);");
s.execute("INSERT INTO fql_ks.fql_table (id) VALUES (1)");
node.nodetool("disablefullquerylog");
// here we are dropping and we expect that ddl replay will reconstruct it
node.executeInternal("DROP TABLE fql_ks.fql_table;");
// without --replay-ddl-statements, the replay will fail on insert because underlying table is not there
final ToolResult negativeRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", "--", temporaryFolder.getRoot().getAbsolutePath());
assertEquals(0, negativeRunner.getExitCode());
try {
node.executeInternalWithResult("SELECT * from fql_ks.fql_table");
fail("This query should fail because we do not expect fql_ks.fql_table to be created!");
} catch (final Exception ex) {
assertTrue(ex.getMessage().contains("table fql_table does not exist"));
}
// here we replay with --replay-ddl-statements so table will be created and insert will succeed
final ToolResult positiveRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", // important
"--replay-ddl-statements", "--", temporaryFolder.getRoot().getAbsolutePath());
assertEquals(0, positiveRunner.getExitCode());
assertRows(node.executeInternalWithResult("SELECT * from fql_ks.fql_table"), QueryResults.builder().row(1).build());
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class GossipShutdownTest method shutdownStayDownTest.
/**
* Makes sure that a node that has shutdown doesn't come back as live (without being restarted)
*/
@Test
public void shutdownStayDownTest() throws IOException, InterruptedException, ExecutionException {
ExecutorService es = Executors.newSingleThreadExecutor();
try (Cluster cluster = init(builder().withNodes(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, v int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("insert into " + KEYSPACE + ".tbl (id, v) values (?,?)", ALL, i, i);
Condition timeToShutdown = newOneTimeCondition();
Condition waitForShutdown = newOneTimeCondition();
AtomicBoolean signalled = new AtomicBoolean(false);
Future f = es.submit(() -> {
await(timeToShutdown);
cluster.get(1).runOnInstance(() -> {
instance.register(new EPChanges());
});
cluster.get(2).runOnInstance(() -> {
StorageService.instance.setIsShutdownUnsafeForTests(true);
instance.stop();
});
waitForShutdown.signalAll();
});
cluster.filters().outbound().from(2).to(1).verbs(GOSSIP_DIGEST_SYN.id).messagesMatching((from, to, message) -> true).drop();
cluster.filters().outbound().from(2).to(1).verbs(GOSSIP_DIGEST_ACK.id).messagesMatching((from, to, message) -> {
if (signalled.compareAndSet(false, true)) {
timeToShutdown.signalAll();
await(waitForShutdown);
return false;
}
return true;
}).drop();
// wait for gossip to exchange a few messages
sleep(10000);
f.get();
} finally {
es.shutdown();
}
}
Aggregations