use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class AutoBootstrapTest method autoBootstrapTest.
// Originally part of BootstrapTest. Broken out into separate test as the in-JVM dtests fail
// if too many instances are created in the same JVM. Bug in the JVM is suspected.
@Test
public void autoBootstrapTest() throws Throwable {
int originalNodeCount = 2;
int expandedNodeCount = originalNodeCount + 1;
try (Cluster cluster = builder().withNodes(originalNodeCount).withTokenSupplier(TokenSupplier.evenlyDistributedTokens(expandedNodeCount)).withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(expandedNodeCount, "dc0", "rack0")).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
populate(cluster, 0, 100);
bootstrapAndJoinNode(cluster);
for (Map.Entry<Integer, Long> e : count(cluster).entrySet()) Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 100L);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CommunicationDuringDecommissionTest method internodeConnectionsDuringDecom.
@Test
public void internodeConnectionsDuringDecom() throws Throwable {
try (Cluster cluster = builder().withNodes(4).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start()) {
BootstrapTest.populate(cluster, 0, 100);
cluster.run(decommission(), 1);
cluster.filters().allVerbs().from(1).messagesMatching((i, i1, iMessage) -> {
throw new AssertionError("Decomissioned node should not send any messages");
}).drop();
Map<Integer, Long> connectionAttempts = new HashMap<>();
long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
// Wait 10 seconds and check if there are any new connection attempts to the decomissioned node
while (currentTimeMillis() <= deadline) {
for (int i = 2; i <= cluster.size(); i++) {
Object[][] res = cluster.get(i).executeInternal("SELECT active_connections, connection_attempts FROM system_views.internode_outbound WHERE address = '127.0.0.1' AND port = 7012");
Assert.assertEquals(1, res.length);
Assert.assertEquals(0L, ((Long) res[0][0]).longValue());
long attempts = ((Long) res[0][1]).longValue();
if (connectionAttempts.get(i) == null)
connectionAttempts.put(i, attempts);
else
Assert.assertEquals(connectionAttempts.get(i), (Long) attempts);
}
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class StreamingTest method testStreaming.
private void testStreaming(int nodes, int replicationFactor, int rowCount, String compactionStrategy) throws Throwable {
try (Cluster cluster = builder().withNodes(nodes).withDataDirCount(// this test expects there to only be a single sstable to stream (with ddirs = 3, we get 3 sstables)
1).withConfig(config -> config.with(NETWORK)).start()) {
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + replicationFactor + "};");
cluster.schemaChange(String.format("CREATE TABLE %s.cf (k text, c1 text, c2 text, PRIMARY KEY (k)) WITH compaction = {'class': '%s', 'enabled': 'true'}", KEYSPACE, compactionStrategy));
for (int i = 0; i < rowCount; ++i) {
for (int n = 1; n < nodes; ++n) cluster.get(n).executeInternal(String.format("INSERT INTO %s.cf (k, c1, c2) VALUES (?, 'value1', 'value2');", KEYSPACE), Integer.toString(i));
}
cluster.get(nodes).executeInternal("TRUNCATE system.available_ranges;");
{
Object[][] results = cluster.get(nodes).executeInternal(String.format("SELECT k, c1, c2 FROM %s.cf;", KEYSPACE));
Assert.assertEquals(0, results.length);
}
// collect message and state
registerSink(cluster, nodes);
cluster.get(nodes).runOnInstance(() -> StorageService.instance.rebuild(null, KEYSPACE, null, null));
{
Object[][] results = cluster.get(nodes).executeInternal(String.format("SELECT k, c1, c2 FROM %s.cf;", KEYSPACE));
Assert.assertEquals(1000, results.length);
Arrays.sort(results, Comparator.comparingInt(a -> Integer.parseInt((String) a[0])));
for (int i = 0; i < results.length; ++i) {
Assert.assertEquals(Integer.toString(i), results[i][0]);
Assert.assertEquals("value1", results[i][1]);
Assert.assertEquals("value2", results[i][2]);
}
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class RestartTest method test.
@Test
public void test() throws Exception {
try (Cluster cluster = init(Cluster.build(2).withDataDirCount(1).start())) {
FBUtilities.waitOnFuture(cluster.get(2).shutdown());
FBUtilities.waitOnFuture(cluster.get(1).shutdown());
cluster.get(1).startup();
cluster.get(2).startup();
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SSTableSkippingReadTest method skippedSSTableWithPartitionDeletionShadowingDataOnAnotherNode2.
@Test
public void skippedSSTableWithPartitionDeletionShadowingDataOnAnotherNode2() throws Throwable {
try (Cluster cluster = init(Cluster.create(2))) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY(pk, ck))"));
// insert a partition tombstone on node 1, the deletion timestamp should end up being the sstable's minTimestamp
cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 1 WHERE pk = 0"));
// and a row from a different partition, to provide the sstable's min/max clustering
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, 1, 1) USING TIMESTAMP 3"));
cluster.get(1).flush(KEYSPACE);
// sstable 1 has minTimestamp == maxTimestamp == 1 and is skipped due to its min/max clusterings. Now we
// insert a row which is not shadowed by the partition delete and flush to a second sstable. The first sstable
// has a maxTimestamp > than the min timestamp of all sstables, so it is a candidate for reinclusion to the
// merge. Hoever, the second sstable's minTimestamp is > than the partition delete. This would cause the
// first sstable not to be reincluded in the merge input, but we can't really make that decision as we don't
// know what data and/or tombstones are present on other nodes
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (0, 6, 6) USING TIMESTAMP 2"));
cluster.get(1).flush(KEYSPACE);
// on node 2, add a row for the deleted partition with an older timestamp than the deletion so it should be shadowed
cluster.get(2).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (0, 10, 10) USING TIMESTAMP 0"));
Object[][] rows = cluster.coordinator(1).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=0 AND ck > 5"), ALL);
// we expect that the row from node 2 (0, 10, 10) was shadowed by the partition delete, but the row from
// node 1 (0, 6, 6) was not.
assertRows(rows, new Object[] { 0, 6, 6 });
}
}
Aggregations