use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class AuthTest method authSetupIsCalledAfterStartup.
/**
* Simply tests that initialisation of a test Instance results in
* StorageService.instance.doAuthSetup being called as the regular
* startup does in CassandraDaemon.setup
*/
@Test
public void authSetupIsCalledAfterStartup() throws IOException {
try (Cluster cluster = Cluster.build().withNodes(1).start()) {
boolean setupCalled = cluster.get(1).callOnInstance(() -> {
long maxWait = TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
long start = System.nanoTime();
while (!StorageService.instance.authSetupCalled() && System.nanoTime() - start < maxWait) Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
return StorageService.instance.authSetupCalled();
});
assertTrue(setupCalled);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class ForceRepairTest method force.
private void force(boolean includeDifference) throws IOException {
long nowInMicro = System.currentTimeMillis() * 1000;
try (Cluster cluster = Cluster.build(3).withConfig(c -> c.set("hinted_handoff_enabled", false).with(Feature.values())).start()) {
init(cluster);
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (k INT PRIMARY KEY, v INT)"));
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), ConsistencyLevel.ALL, i, i, nowInMicro++);
ClusterUtils.stopUnchecked(cluster.get(2));
// repair should fail because node2 is down
IInvokableInstance node1 = cluster.get(1);
for (String[] args : Arrays.asList(new String[] { "--full" }, new String[] { "--full", "--preview" }, // nothing should be in the repaired set, so shouldn't stream
new String[] { "--full", "--validate" }, // IR Preview
new String[] { "--preview" }, // nothing should be in the repaired set, so shouldn't stream
new String[] { "--validate" }, // IR
new String[0])) {
if (includeDifference)
// each loop should have a different timestamp, causing a new difference
node1.executeInternal(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), -1, -1, nowInMicro++);
try {
node1.nodetoolResult(ArrayUtils.addAll(new String[] { "repair", KEYSPACE }, args)).asserts().failure();
node1.nodetoolResult(ArrayUtils.addAll(new String[] { "repair", KEYSPACE, "--force" }, args)).asserts().success();
assertNoRepairedAt(cluster);
} catch (Exception | Error e) {
// tag the error to include which args broke
e.addSuppressed(new AssertionError("Failure for args: " + Arrays.toString(args)));
throw e;
}
}
if (includeDifference) {
SimpleQueryResult expected = QueryResults.builder().row(-1, -1).build();
for (IInvokableInstance node : Arrays.asList(node1, cluster.get(3))) {
SimpleQueryResult results = node.executeInternalWithResult(withKeyspace("SELECT * FROM %s.tbl WHERE k=?"), -1);
expected.reset();
AssertUtils.assertRows(results, expected);
}
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NodeNotInRingTest method nodeNotInRingTest.
@Test
public void nodeNotInRingTest() throws Throwable {
try (Cluster cluster = builder().withNodes(3).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
cluster.schemaChange("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};");
cluster.schemaChange("CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
cluster.filters().verbs(Verb.GOSSIP_DIGEST_ACK.id, Verb.GOSSIP_DIGEST_SYN.id).from(3).outbound().drop().on();
cluster.run(GossipHelper.removeFromRing(cluster.get(3)), 1, 2);
cluster.run(inst -> inst.runsOnInstance(() -> {
Assert.assertEquals("There should be 2 remaining nodes in ring", 2, StorageService.instance.effectiveOwnershipWithPort(KEYSPACE).size());
}), 1, 2);
populate(cluster, 0, 50, 1, ConsistencyLevel.ALL);
populate(cluster, 50, 100, 2, ConsistencyLevel.ALL);
Map<Integer, Long> counts = BootstrapTest.count(cluster);
Assert.assertEquals(0L, counts.get(3).longValue());
Assert.assertEquals(100L, counts.get(2).longValue());
Assert.assertEquals(100L, counts.get(1).longValue());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class HostReplacementTest method replaceDownedHost.
/**
* Attempt to do a host replacement on a down host
*/
@Test
public void replaceDownedHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
stopUnchecked(nodeToRemove);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, props -> {
// since we have a downed host there might be a schema version which is old show up but
// can't be fetched since the host is down...
props.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
});
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, replacingNode);
logger.info("Current ring is {}", assertRingIs(replacingNode, seed, replacingNode));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class HostReplacementTest method replaceAliveHost.
/**
* Attempt to do a host replacement on a alive host
*/
@Test
public void replaceAliveHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK).set(Constants.KEY_DTEST_API_STARTUP_FAILURE_AS_SHUTDOWN, false)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
// now create a new node to replace the other node
Assertions.assertThatThrownBy(() -> replaceHostAndStart(cluster, nodeToRemove)).as("Startup of instance should have failed as you can not replace a alive node").hasMessageContaining("Cannot replace a live node").isInstanceOf(UnsupportedOperationException.class);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, nodeToRemove);
logger.info("Current ring is {}", assertRingIs(nodeToRemove, seed, nodeToRemove));
validateRows(seed.coordinator(), expectedState);
validateRows(nodeToRemove.coordinator(), expectedState);
}
}
Aggregations