use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class RepairOperationalTest method hostFilterDifferentDC.
@Test
public void hostFilterDifferentDC() throws IOException {
try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
// 1-2 : datacenter1
// 3-4 : datacenter2
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
// choose a node in the DC that doesn't have any replicas
IInvokableInstance node = cluster.get(3);
Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter2");
// fails with "Specified hosts [127.0.0.3, 127.0.0.1] do not share range (0,1000] needed for repair. Either restrict repair ranges with -st/-et options, or specify one of the neighbors that share this range with this node: [].. Check the logs on the repair participants for further details"
node.nodetoolResult("repair", "-full", "-hosts", cluster.get(1).broadcastAddress().getAddress().getHostAddress(), "-hosts", node.broadcastAddress().getAddress().getHostAddress(), "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class RepairOperationalTest method dcFilterOnEmptyDC.
@Test
public void dcFilterOnEmptyDC() throws IOException {
try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
// 1-2 : datacenter1
// 3-4 : datacenter2
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
// choose a node in the DC that doesn't have any replicas
IInvokableInstance node = cluster.get(3);
Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter2");
// fails with "the local data center must be part of the repair"
node.nodetoolResult("repair", "-full", "-dc", "datacenter1", "-dc", "datacenter2", "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class HostReplacementTest method seedGoesDownBeforeDownHost.
/**
* If the seed goes down, then another node, once the seed comes back, make sure host replacements still work.
*/
@Test
public void seedGoesDownBeforeDownHost() throws IOException {
// start with 3 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(3);
try (Cluster cluster = Cluster.build(3).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 4 ? 2 : node)).start()) {
// call early as this can't be touched on a down node
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
IInvokableInstance nodeToStayAlive = cluster.get(3);
setupCluster(cluster);
// collect rows/tokens to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
List<String> beforeCrashTokens = getTokenMetadataTokens(seed);
// shutdown the seed, then the node to remove
stopUnchecked(seed);
stopUnchecked(nodeToRemove);
// restart the seed
seed.startup();
// make sure the node to remove is still in the ring
assertInRing(seed, nodeToRemove);
// make sure node1 still has node2's tokens
List<String> currentTokens = getTokenMetadataTokens(seed);
Assertions.assertThat(currentTokens).as("Tokens no longer match after restarting").isEqualTo(beforeCrashTokens);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove);
List<IInvokableInstance> expectedRing = Arrays.asList(seed, replacingNode, nodeToStayAlive);
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
awaitRingJoin(nodeToStayAlive, replacingNode);
// make sure all nodes are healthy
logger.info("Current ring is {}", awaitRingHealthy(seed));
expectedRing.forEach(i -> assertRingIs(i, expectedRing));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class TableMetricTest method systemTables.
/**
* Makes sure that all system tables have the expected metrics
* @throws IOException
*/
@Test
public void systemTables() throws IOException {
try (Cluster cluster = Cluster.build(1).start()) {
loadSystemTables(cluster);
assertSystemTableMetrics(cluster);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class StreamingMetricsTest method testMetricsWithStreamingFromTwoNodes.
public void testMetricsWithStreamingFromTwoNodes(boolean useRepair) throws Exception {
try (Cluster cluster = init(Cluster.build(3).withDataDirCount(1).withConfig(config -> config.with(NETWORK).set("stream_entire_sstables", false).set("hinted_handoff_enabled", false)).start(), 2)) {
cluster.schemaChange(String.format("CREATE TABLE %s.cf (k text, c1 text, c2 text, PRIMARY KEY (k)) WITH compaction = {'class': '%s', 'enabled': 'false'}", KEYSPACE, "LeveledCompactionStrategy"));
IMessageFilters.Filter drop1to3 = cluster.filters().verbs(MUTATION_REQ.id).from(1).to(3).drop();
final int rowsPerFile = 500;
final int files = 5;
for (int k = 0; k < files; k++) {
for (int i = k * rowsPerFile; i < k * rowsPerFile + rowsPerFile; ++i) {
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.cf (k, c1, c2) VALUES (?, 'value1', 'value2');"), ConsistencyLevel.ONE, Integer.toString(i));
}
cluster.get(1).flush(KEYSPACE);
cluster.get(2).flush(KEYSPACE);
}
drop1to3.off();
// Checks that the table is empty on node 3
Object[][] results = cluster.get(3).executeInternal(withKeyspace("SELECT k, c1, c2 FROM %s.cf;"));
assertThat(results.length).isEqualTo(0);
checkThatNoStreamingOccuredBetweenTheThreeNodes(cluster);
// Trigger streaming from node 3
if (useRepair)
cluster.get(3).nodetool("repair", "--full");
else
cluster.get(3).nodetool("rebuild", "--keyspace", KEYSPACE);
// Check streaming metrics on node 1
checkThatNoStreamingOccured(cluster, 1, 2);
long bytesFrom1 = checkDataSent(cluster, 1, 3);
checkDataReceived(cluster, 1, 3, 0, 0);
if (useRepair)
checkTotalDataSent(cluster, 1, bytesFrom1, bytesFrom1, files);
else
checkTotalDataSent(cluster, 1, bytesFrom1, 0, 0);
checkTotalDataReceived(cluster, 1, 0);
// Check streaming metrics on node 2
checkThatNoStreamingOccured(cluster, 2, 1);
long bytesFrom2 = checkDataSent(cluster, 2, 3);
checkDataReceived(cluster, 1, 2, 0, 0);
if (useRepair)
checkTotalDataSent(cluster, 2, bytesFrom2, bytesFrom2, files);
else
checkTotalDataSent(cluster, 2, bytesFrom2, 0, 0);
checkTotalDataReceived(cluster, 2, 0);
// Check streaming metrics on node 3
checkDataReceived(cluster, 3, 1, bytesFrom1, files);
checkDataReceived(cluster, 3, 2, bytesFrom2, files);
checkTotalDataSent(cluster, 3, 0, 0, 0);
checkTotalDataReceived(cluster, 3, bytesFrom1 + bytesFrom2);
}
}
Aggregations