use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class PartitionDenylistTest method checkStartupWithoutTriggeringUnavailable.
// Create a four node cluster, populate with some denylist entries, stop all
// the nodes, then bring them up one by one, waiting for each node to complete
// startup before starting the next.
//
// On startup each node runs a SELECT * query on the partition denylist table
// to populate the cache. The whole keyspace is unlikely to be available until
// three of the four nodes are started, so the early nodes will go through several
// cycles of failing to retrieve the partition denylist before succeeding.
//
// with({NETWORK,GOSSIP} is currently required for in-JVM dtests to create
// the distributed system tables.
@Test
public void checkStartupWithoutTriggeringUnavailable() throws IOException, InterruptedException, ExecutionException, TimeoutException {
int nodeCount = 4;
// down from 30s default
System.setProperty("cassandra.ring_delay_ms", "5000");
System.setProperty("cassandra.consistent.rangemovement", "false");
System.setProperty("cassandra.consistent.simultaneousmoves.allow", "true");
try (Cluster cluster = Cluster.build(nodeCount).withConfig(config -> config.with(NETWORK).with(GOSSIP).set("partition_denylist_enabled", true).set("denylist_initial_load_retry", "1s")).createWithoutStarting()) {
cluster.forEach(i -> {
i.startup();
i.runOnInstance(PartitionDenylistTest::waitUntilStarted);
});
// Do a cluster-wide no unavailables were recorded while the denylist was loaded.
cluster.forEach(i -> i.runOnInstance(PartitionDenylistTest::checkNoUnavailables));
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class RepairErrorsTest method testNoSuchRepairSessionAnticompaction.
@Test
public void testNoSuchRepairSessionAnticompaction() throws IOException {
try (Cluster cluster = init(Cluster.build(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).withInstanceInitializer(ByteBuddyHelper::installACNoSuchRepairSession).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, x int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("insert into " + KEYSPACE + ".tbl (id, x) VALUES (?,?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
long mark = cluster.get(1).logs().mark();
cluster.forEach(i -> i.nodetoolResult("repair", KEYSPACE).asserts().failure());
assertTrue(cluster.get(1).logs().grep(mark, "^ERROR").getResult().isEmpty());
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class RepairErrorsTest method testRemoteValidationFailure.
@Test
public void testRemoteValidationFailure() throws IOException {
Cluster.Builder builder = Cluster.build(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).withInstanceInitializer(ByteBuddyHelper::install);
try (Cluster cluster = builder.createWithoutStarting()) {
cluster.setUncaughtExceptionsFilter((i, throwable) -> {
if (i == 2)
return throwable.getMessage() != null && throwable.getMessage().contains("IGNORE");
return false;
});
cluster.startup();
init(cluster);
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, x int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("insert into " + KEYSPACE + ".tbl (id, x) VALUES (?,?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
long mark = cluster.get(1).logs().mark();
cluster.forEach(i -> i.nodetoolResult("repair", "--full").asserts().failure());
Assertions.assertThat(cluster.get(1).logs().grep(mark, "^ERROR").getResult()).isEmpty();
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class ReprepareFuzzTest method fuzzTest.
@Test
public void fuzzTest() throws Throwable {
try (ICluster<IInvokableInstance> c = builder().withNodes(1).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::alwaysNewBehaviour).start()) {
// Long string to make us invalidate caches occasionally
String veryLongString = "very";
for (int i = 0; i < 2; i++) veryLongString += veryLongString;
final String qualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM ks%d.tbl";
final String unqualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM tbl";
int KEYSPACES = 3;
final int STATEMENTS_PER_KS = 3;
for (int i = 0; i < KEYSPACES; i++) {
c.schemaChange(withKeyspace("CREATE KEYSPACE ks" + i + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
c.schemaChange(withKeyspace("CREATE TABLE ks" + i + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck));"));
for (int j = 0; j < i; j++) c.coordinator(1).execute("INSERT INTO ks" + i + ".tbl (pk, ck) VALUES (?, ?)", ConsistencyLevel.QUORUM, 1, j);
}
List<Thread> threads = new ArrayList<>();
AtomicBoolean interrupt = new AtomicBoolean(false);
AtomicReference<Throwable> thrown = new AtomicReference<>();
int INFREQUENT_ACTION_COEF = 10;
long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(60);
for (int i = 0; i < FBUtilities.getAvailableProcessors() * 2; i++) {
int seed = i;
threads.add(new Thread(() -> {
com.datastax.driver.core.Cluster cluster = null;
Session session = null;
try {
Random rng = new Random(seed);
int usedKsIdx = -1;
String usedKs = null;
Map<Pair<Integer, Integer>, PreparedStatement> qualifiedStatements = new HashMap<>();
Map<Pair<Integer, Integer>, PreparedStatement> unqualifiedStatements = new HashMap<>();
cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
session = cluster.connect();
while (!interrupt.get() && (System.nanoTime() < deadline)) {
final int ks = rng.nextInt(KEYSPACES);
final int statementIdx = rng.nextInt(STATEMENTS_PER_KS);
final Pair<Integer, Integer> statementId = Pair.create(ks, statementIdx);
int v = rng.nextInt(INFREQUENT_ACTION_COEF + 1);
Action[] pool;
if (v == INFREQUENT_ACTION_COEF)
pool = infrequent;
else
pool = frequent;
Action action = pool[rng.nextInt(pool.length)];
switch(action) {
case EXECUTE_QUALIFIED:
if (!qualifiedStatements.containsKey(statementId))
continue;
try {
int counter = 0;
for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(qualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
Assert.assertEquals(ks, counter);
} catch (Throwable t) {
if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
continue;
throw t;
}
break;
case EXECUTE_UNQUALIFIED:
if (!unqualifiedStatements.containsKey(statementId))
continue;
try {
int counter = 0;
for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(unqualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
Object[] current = iter.next();
int v0 = (int) current[0];
int v1 = (int) current[1];
Assert.assertEquals(v0, 1);
Assert.assertEquals(v1, counter++);
}
Assert.assertEquals(unqualifiedStatements.get(statementId).getQueryKeyspace() + " " + usedKs + " " + statementId, Integer.parseInt(unqualifiedStatements.get(statementId).getQueryKeyspace().replace("ks", "")), counter);
} catch (Throwable t) {
if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
continue;
throw t;
}
break;
case PREPARE_QUALIFIED:
{
String qs = String.format(qualified, statementIdx, statementIdx, ks);
String keyspace = "ks" + ks;
PreparedStatement preparedQualified = session.prepare(qs);
// With prepared qualified, keyspace will be set to the keyspace of the statement when it was first executed
PreparedStatementHelper.assertHashWithoutKeyspace(preparedQualified, qs, keyspace);
qualifiedStatements.put(statementId, preparedQualified);
}
break;
case PREPARE_UNQUALIFIED:
try {
String qs = String.format(unqualified, statementIdx, statementIdx, ks);
PreparedStatement preparedUnqalified = session.prepare(qs);
Assert.assertEquals(preparedUnqalified.getQueryKeyspace(), usedKs);
PreparedStatementHelper.assertHashWithKeyspace(preparedUnqalified, qs, usedKs);
unqualifiedStatements.put(Pair.create(usedKsIdx, statementIdx), preparedUnqalified);
} catch (InvalidQueryException iqe) {
if (!iqe.getMessage().contains("No keyspace has been"))
throw iqe;
} catch (Throwable t) {
if (usedKs == null) {
// ignored
continue;
}
throw t;
}
break;
case CLEAR_CACHES:
c.get(1).runOnInstance(() -> {
SystemKeyspace.loadPreparedStatements((id, query, keyspace) -> {
if (rng.nextBoolean())
QueryProcessor.instance.evictPrepared(id);
return true;
});
});
break;
case RELOAD_FROM_TABLES:
c.get(1).runOnInstance(QueryProcessor::clearPreparedStatementsCache);
c.get(1).runOnInstance(() -> QueryProcessor.instance.preloadPreparedStatements());
break;
case SWITCH_KEYSPACE:
usedKsIdx = ks;
usedKs = "ks" + ks;
session.execute("USE " + usedKs);
break;
case FORGET_PREPARED:
Map<Pair<Integer, Integer>, PreparedStatement> toCleanup = rng.nextBoolean() ? qualifiedStatements : unqualifiedStatements;
Set<Pair<Integer, Integer>> toDrop = new HashSet<>();
for (Pair<Integer, Integer> e : toCleanup.keySet()) {
if (rng.nextBoolean())
toDrop.add(e);
}
for (Pair<Integer, Integer> e : toDrop) toCleanup.remove(e);
toDrop.clear();
break;
case RECONNECT:
session.close();
cluster.close();
cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
session = cluster.connect();
qualifiedStatements.clear();
unqualifiedStatements.clear();
usedKs = null;
usedKsIdx = -1;
break;
}
}
} catch (Throwable t) {
interrupt.set(true);
t.printStackTrace();
while (true) {
Throwable seen = thrown.get();
Throwable merged = Throwables.merge(seen, t);
if (thrown.compareAndSet(seen, merged))
break;
}
throw t;
} finally {
if (session != null)
session.close();
if (cluster != null)
cluster.close();
}
}));
}
for (Thread thread : threads) thread.start();
for (Thread thread : threads) thread.join();
if (thrown.get() != null)
throw thrown.get();
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class PreviewRepairTest method snapshotTest.
@Test
public void snapshotTest() throws IOException, InterruptedException {
try (Cluster cluster = init(Cluster.build(3).withConfig(config -> config.set("snapshot_on_repaired_data_mismatch", true).with(GOSSIP).with(NETWORK)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
cluster.schemaChange("create table " + KEYSPACE + ".tbl2 (id int primary key, t int)");
// populate 2 tables
insert(cluster.coordinator(1), 0, 100, "tbl");
insert(cluster.coordinator(1), 0, 100, "tbl2");
cluster.forEach((n) -> n.flush(KEYSPACE));
// make sure everything is marked repaired
cluster.get(1).callOnInstance(repair(options(false, false)));
waitMarkedRepaired(cluster);
// make node2 mismatch
unmarkRepaired(cluster.get(2), "tbl");
verifySnapshots(cluster, "tbl", true);
verifySnapshots(cluster, "tbl2", true);
AtomicInteger snapshotMessageCounter = new AtomicInteger();
cluster.filters().verbs(Verb.SNAPSHOT_REQ.id).messagesMatching((from, to, message) -> {
snapshotMessageCounter.incrementAndGet();
return false;
}).drop();
cluster.get(1).callOnInstance(repair(options(true, true)));
verifySnapshots(cluster, "tbl", false);
// tbl2 should not have a mismatch, so the snapshots should be empty here
verifySnapshots(cluster, "tbl2", true);
assertEquals(3, snapshotMessageCounter.get());
// and make sure that we don't try to snapshot again
snapshotMessageCounter.set(0);
cluster.get(3).callOnInstance(repair(options(true, true)));
assertEquals(0, snapshotMessageCounter.get());
}
}
Aggregations