use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class MigrationCoordinatorTest method replaceNode.
/**
* We shouldn't wait on versions only available from a node being replaced
* see CASSANDRA-
*/
@Test
public void replaceNode() throws Throwable {
try (Cluster cluster = Cluster.build(2).withTokenSupplier(TokenSupplier.evenlyDistributedTokens(3)).withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(3, "dc0", "rack0")).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
cluster.schemaChange("CREATE KEYSPACE ks with replication={'class':'SimpleStrategy', 'replication_factor':2}");
InetAddress replacementAddress = cluster.get(2).broadcastAddress().getAddress();
cluster.get(2).shutdown(false);
cluster.schemaChangeIgnoringStoppedInstances("CREATE TABLE ks.tbl (k int primary key, v int)");
IInstanceConfig config = cluster.newInstanceConfig();
config.set("auto_bootstrap", true);
System.setProperty("cassandra.replace_address", replacementAddress.getHostAddress());
cluster.bootstrap(config).startup();
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NativeMixedVersionTest method v4ConnectionCleansUpThreadLocalState.
@Test
public void v4ConnectionCleansUpThreadLocalState() throws IOException {
// make sure to limit the netty thread pool to size 1, this will make the test determanistic as all work
// will happen on the single thread.
System.setProperty("io.netty.eventLoopThreads", "1");
try (Cluster cluster = Cluster.build(1).withConfig(c -> c.with(Feature.values()).set("track_warnings", ImmutableMap.of("enabled", true, "local_read_size", ImmutableMap.of("warn_threshold_kb", 1)))).start()) {
init(cluster);
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck1 int, value blob, PRIMARY KEY (pk, ck1))"));
IInvokableInstance node = cluster.get(1);
ByteBuffer blob = ByteBuffer.wrap("This is just some large string to get some number of bytes".getBytes(StandardCharsets.UTF_8));
for (int i = 0; i < 100; i++) node.executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck1, value) VALUES (?, ?, ?)"), 0, i, blob);
// does not support warnings)
try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V5);
Session session = driver.connect()) {
ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isNotEmpty();
}
try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V3);
Session session = driver.connect()) {
ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
}
// this should not happen; so make sure no logs are found
List<String> result = node.logs().grep("Warnings present in message with version less than").getResult();
Assertions.assertThat(result).isEmpty();
} finally {
System.clearProperty("io.netty.eventLoopThreads");
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NativeTransportEncryptionOptionsTest method optionalTlsConnectionAllowedWithKeystoreTest.
@Test
public void optionalTlsConnectionAllowedWithKeystoreTest() throws Throwable {
try (Cluster cluster = builder().withNodes(1).withConfig(c -> {
c.with(Feature.NATIVE_PROTOCOL);
c.set("client_encryption_options", validKeystore);
}).createWithoutStarting()) {
InetAddress address = cluster.get(1).config().broadcastAddress().getAddress();
int port = (int) cluster.get(1).config().get("native_transport_port");
TlsConnection tlsConnection = new TlsConnection(address.getHostAddress(), port);
tlsConnection.assertCannotConnect();
cluster.startup();
Assert.assertEquals("TLS native connection should be possible with keystore by default", ConnectResult.NEGOTIATED, tlsConnection.connect());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NativeTransportEncryptionOptionsTest method optionalTlsConnectionAllowedToRegularPortTest.
@Test
public void optionalTlsConnectionAllowedToRegularPortTest() throws Throwable {
try (Cluster cluster = builder().withNodes(1).withConfig(c -> {
c.with(Feature.NATIVE_PROTOCOL);
c.set("native_transport_port_ssl", 9043);
c.set("client_encryption_options", ImmutableMap.builder().putAll(validKeystore).put("enabled", false).put("optional", true).build());
}).createWithoutStarting()) {
InetAddress address = cluster.get(1).config().broadcastAddress().getAddress();
int unencrypted_port = (int) cluster.get(1).config().get("native_transport_port");
int ssl_port = (int) cluster.get(1).config().get("native_transport_port_ssl");
// Create the connections and prove they cannot connect before server start
TlsConnection connectionToUnencryptedPort = new TlsConnection(address.getHostAddress(), unencrypted_port);
connectionToUnencryptedPort.assertCannotConnect();
TlsConnection connectionToEncryptedPort = new TlsConnection(address.getHostAddress(), ssl_port);
connectionToEncryptedPort.assertCannotConnect();
cluster.startup();
Assert.assertEquals("TLS native connection should be possible to native_transport_port_ssl", ConnectResult.NEGOTIATED, connectionToEncryptedPort.connect());
Assert.assertEquals("TLS native connection should not be possible on the regular port if an SSL port is specified", ConnectResult.FAILED_TO_NEGOTIATE, // but did connect
connectionToUnencryptedPort.connect());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class PreviewRepairTest method testStartNonIntersectingPreviewRepair.
/**
* Makes sure we can start a non-intersecting preview repair while there are other pending sstables on disk
*/
@Test
public void testStartNonIntersectingPreviewRepair() throws IOException, InterruptedException, ExecutionException {
ExecutorService es = Executors.newSingleThreadExecutor();
try (Cluster cluster = init(Cluster.build(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
insert(cluster.coordinator(1), 0, 100);
cluster.forEach((node) -> node.flush(KEYSPACE));
cluster.get(1).nodetoolResult("repair", KEYSPACE, "tbl").asserts().success();
insert(cluster.coordinator(1), 100, 100);
cluster.forEach((node) -> node.flush(KEYSPACE));
// pause inc repair validation messages on node2 until node1 has finished
Condition incRepairStarted = newOneTimeCondition();
Condition continueIncRepair = newOneTimeCondition();
DelayFirstRepairTypeMessageFilter filter = DelayFirstRepairTypeMessageFilter.validationRequest(incRepairStarted, continueIncRepair);
cluster.filters().outbound().verbs(Verb.VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
// get local ranges to repair two separate ranges:
List<String> localRanges = cluster.get(1).callOnInstance(() -> {
List<String> res = new ArrayList<>();
for (Range<Token> r : StorageService.instance.getLocalReplicas(KEYSPACE).ranges()) res.add(r.left.getTokenValue() + ":" + r.right.getTokenValue());
return res;
});
assertEquals(2, localRanges.size());
String[] previewedRange = localRanges.get(0).split(":");
String[] repairedRange = localRanges.get(1).split(":");
Future<NodeToolResult> repairStatusFuture = es.submit(() -> cluster.get(1).nodetoolResult("repair", "-st", repairedRange[0], "-et", repairedRange[1], KEYSPACE, "tbl"));
// wait for node1 to start validation compaction
incRepairStarted.await();
// now we have pending sstables in range "repairedRange", make sure we can preview "previewedRange"
cluster.get(1).nodetoolResult("repair", "-vd", "-st", previewedRange[0], "-et", previewedRange[1], KEYSPACE, "tbl").asserts().success().notificationContains("Repaired data is in sync");
continueIncRepair.signalAll();
repairStatusFuture.get().asserts().success();
} finally {
es.shutdown();
}
}
Aggregations