use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class UnableToParseClientMessageFromBlockedSubnetTest method getCluster.
private Cluster getCluster() {
if (CLUSTER == null || CLUSTER_EXCLUDED_SUBNETS != excludeSubnets) {
if (CLUSTER != null) {
CLUSTER.close();
CLUSTER = null;
}
try {
CLUSTER = init(Cluster.build(1).withConfig(c -> c.with(Feature.values()).set("client_error_reporting_exclusions", ImmutableMap.of("subnets", excludeSubnets))).start());
CLUSTER_EXCLUDED_SUBNETS = excludeSubnets;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return CLUSTER;
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class ResourceLeakTest method doTest.
void doTest(int numClusterNodes, Consumer<IInstanceConfig> updater) throws Throwable {
for (int loop = 0; loop < numTestLoops; loop++) {
System.out.println(String.format("========== Starting loop %03d ========", loop));
try (Cluster cluster = (Cluster) builder().withNodes(numClusterNodes).withConfig(updater).start()) {
init(cluster);
String tableName = "tbl" + loop;
cluster.schemaChange("CREATE TABLE " + KEYSPACE + "." + tableName + " (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + "." + tableName + "(pk,ck,v) VALUES (0,0,0)", ConsistencyLevel.ALL);
cluster.get(1).flush(KEYSPACE);
if (dumpEveryLoop) {
dumpResources(String.format("loop%03d", loop));
}
} catch (Throwable tr) {
System.out.println("Dumping resources for exception: " + tr.getMessage());
tr.printStackTrace();
dumpResources("exception");
}
if (forceCollection) {
System.runFinalization();
System.gc();
}
System.out.println(String.format("========== Completed loop %03d ========", loop));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SchemaDisagreementTest method writeWithInconsequentialSchemaDisagreement.
/**
* If a node isn't aware of a column, but receives a mutation without that column, the write should succeed.
*/
@Test
public void writeWithInconsequentialSchemaDisagreement() throws Throwable {
try (Cluster cluster = init(builder().withNodes(3).withConfig(config -> config.with(NETWORK)).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v1 int, PRIMARY KEY (pk, ck))"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1) VALUES (1, 1, 1)"));
cluster.get(2).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1) VALUES (1, 1, 1)"));
cluster.get(3).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1) VALUES (1, 1, 1)"));
// Introduce schema disagreement
cluster.schemaChange(withKeyspace("ALTER TABLE %s.tbl ADD v2 int"), 1);
// this write shouldn't cause any problems because it doesn't write to the new column
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1) VALUES (2, 2, 2)"), ALL);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class MessageForwardingTest method mutationsForwardedToAllReplicasTest.
@Test
public void mutationsForwardedToAllReplicasTest() {
String originalTraceTimeout = TracingUtil.setWaitForTracingEventTimeoutSecs("1");
final int numInserts = 100;
Map<InetAddress, Integer> forwardFromCounts = new HashMap<>();
Map<InetAddress, Integer> commitCounts = new HashMap<>();
try (Cluster cluster = (Cluster) init(builder().withDC("dc0", 1).withDC("dc1", 3).start())) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v text, PRIMARY KEY (pk, ck))");
cluster.forEach(instance -> commitCounts.put(instance.broadcastAddress().getAddress(), 0));
final UUID sessionId = UUIDGen.getTimeUUID();
Stream<Future<Object[][]>> inserts = IntStream.range(0, numInserts).mapToObj((idx) -> {
return cluster.coordinator(1).asyncExecuteWithTracing(sessionId, "INSERT INTO " + KEYSPACE + ".tbl(pk,ck,v) VALUES (1, 1, 'x')", ConsistencyLevel.ALL);
});
// Wait for each of the futures to complete before checking the traces, don't care
// about the result so
// noinspection ResultOfMethodCallIgnored
inserts.map(IsolatedExecutor::waitOn).collect(Collectors.toList());
cluster.stream("dc1").forEach(instance -> forwardFromCounts.put(instance.broadcastAddress().getAddress(), 0));
cluster.forEach(instance -> commitCounts.put(instance.broadcastAddress().getAddress(), 0));
List<TracingUtil.TraceEntry> traces = TracingUtil.getTrace(cluster, sessionId, ConsistencyLevel.ALL);
traces.forEach(traceEntry -> {
if (traceEntry.activity.contains("Appending to commitlog")) {
commitCounts.compute(traceEntry.source, (k, v) -> (v != null ? v : 0) + 1);
} else if (traceEntry.activity.contains("Enqueuing forwarded write to ")) {
forwardFromCounts.compute(traceEntry.source, (k, v) -> (v != null ? v : 0) + 1);
}
});
// Check that each node in dc1 was the forwarder at least once. There is a (1/3)^numInserts chance
// that the same node will be picked, but the odds of that are ~2e-48.
forwardFromCounts.forEach((source, count) -> Assert.assertTrue(source + " should have been randomized to forward messages", count > 0));
// Check that each node received the forwarded messages once (and only once)
commitCounts.forEach((source, count) -> Assert.assertEquals(source + " appending to commitlog traces", (long) numInserts, (long) count));
} catch (IOException e) {
Assert.fail("Threw exception: " + e);
} finally {
TracingUtil.setWaitForTracingEventTimeoutSecs(originalTraceTimeout);
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class MigrationCoordinatorTest method explicitEndpointIgnore.
@Test
public void explicitEndpointIgnore() throws Throwable {
try (Cluster cluster = Cluster.build(2).withTokenSupplier(TokenSupplier.evenlyDistributedTokens(3)).withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(3, "dc0", "rack0")).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
cluster.schemaChange("CREATE KEYSPACE ks with replication={'class':'SimpleStrategy', 'replication_factor':2}");
InetAddress ignoredEndpoint = cluster.get(2).broadcastAddress().getAddress();
cluster.get(2).shutdown(false);
cluster.schemaChangeIgnoringStoppedInstances("CREATE TABLE ks.tbl (k int primary key, v int)");
IInstanceConfig config = cluster.newInstanceConfig();
config.set("auto_bootstrap", true);
System.setProperty(MigrationCoordinator.IGNORED_ENDPOINTS_PROP, ignoredEndpoint.getHostAddress());
System.setProperty("cassandra.consistent.rangemovement", "false");
cluster.bootstrap(config).startup();
}
}
Aggregations