use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class GroupByTest method groupByWithDeletesAndSrpOnPartitions.
@Test
public void groupByWithDeletesAndSrpOnPartitions() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("user_defined_functions_enabled", "true")).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, PRIMARY KEY (pk, ck))"));
initFunctions(cluster);
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (1, '1') USING TIMESTAMP 0"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (2, '2') USING TIMESTAMP 0"));
cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck='0'"));
cluster.get(2).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, '0') USING TIMESTAMP 0"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=1 AND ck='1'"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=2 AND ck='2'"));
for (String limitClause : new String[] { "", "LIMIT 1", "LIMIT 10", "PER PARTITION LIMIT 1", "PER PARTITION LIMIT 10" }) {
String query = withKeyspace("SELECT concat(ck) FROM %s.tbl GROUP BY pk " + limitClause);
for (int i = 1; i <= 4; i++) {
Iterator<Object[]> rows = cluster.coordinator(2).executeWithPaging(query, ConsistencyLevel.ALL, i);
assertRows(Iterators.toArray(rows, Object[].class));
}
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class GroupByTest method groupByWithDeletesAndSrpOnRows.
@Test
public void groupByWithDeletesAndSrpOnRows() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).withConfig((cfg) -> cfg.set("user_defined_functions_enabled", "true")).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, PRIMARY KEY (pk, ck))"));
initFunctions(cluster);
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, '1') USING TIMESTAMP 0"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, '2') USING TIMESTAMP 0"));
cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck='0'"));
cluster.get(2).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, '0') USING TIMESTAMP 0"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck='1'"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck='2'"));
for (String limitClause : new String[] { "", "LIMIT 1", "LIMIT 10", "PER PARTITION LIMIT 1", "PER PARTITION LIMIT 10" }) {
String query = withKeyspace("SELECT concat(ck) FROM %s.tbl GROUP BY pk " + limitClause);
for (int i = 1; i <= 4; i++) {
Iterator<Object[]> rows = cluster.coordinator(2).executeWithPaging(query, ConsistencyLevel.ALL, i);
assertRows(Iterators.toArray(rows, Object[].class));
}
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class GroupByTest method testGroupWithDeletesAndPaging.
@Test
public void testGroupWithDeletesAndPaging() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).withConfig(cfg -> cfg.with(Feature.GOSSIP, NETWORK, NATIVE_PROTOCOL)).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, PRIMARY KEY (pk, ck))"));
ICoordinator coordinator = cluster.coordinator(1);
coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (0, 0)"), ConsistencyLevel.ALL);
coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, ck) VALUES (1, 1)"), ConsistencyLevel.ALL);
cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=0 AND ck=0"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl WHERE pk=1 AND ck=1"));
String query = withKeyspace("SELECT * FROM %s.tbl GROUP BY pk");
Iterator<Object[]> rows = coordinator.executeWithPaging(query, ConsistencyLevel.ALL, 1);
assertRows(Iterators.toArray(rows, Object[].class));
try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session session = c.connect()) {
SimpleStatement stmt = new SimpleStatement(withKeyspace("select * from %s.tbl where pk = 1 group by pk"));
stmt.setFetchSize(1);
Iterator<Row> rs = session.execute(stmt).iterator();
Assert.assertFalse(rs.hasNext());
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class InternodeEncryptionOptionsTest method optionalTlsConnectionAllowedWithKeystoreTest.
@Test
public void optionalTlsConnectionAllowedWithKeystoreTest() throws Throwable {
try (Cluster cluster = builder().withNodes(1).withConfig(c -> {
c.with(Feature.NETWORK);
c.set("server_encryption_options", validKeystore);
}).createWithoutStarting()) {
InetAddress address = cluster.get(1).config().broadcastAddress().getAddress();
int port = cluster.get(1).config().broadcastAddress().getPort();
TlsConnection tlsConnection = new TlsConnection(address.getHostAddress(), port);
tlsConnection.assertCannotConnect();
cluster.startup();
Assert.assertEquals("TLS connection should be possible with keystore by default", ConnectResult.NEGOTIATED, tlsConnection.connect());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class InternodeEncryptionOptionsTest method tlsConnectionRejectedWhenUnencrypted.
@Test
public void tlsConnectionRejectedWhenUnencrypted() throws Throwable {
try (Cluster cluster = builder().withNodes(1).withConfig(c -> {
c.with(Feature.NETWORK);
c.set("server_encryption_options", ImmutableMap.builder().putAll(validKeystore).put("internode_encryption", "none").put("optional", false).build());
}).createWithoutStarting()) {
InetAddress address = cluster.get(1).config().broadcastAddress().getAddress();
int regular_port = (int) cluster.get(1).config().get("storage_port");
// Create the connections and prove they cannot connect before server start
TlsConnection connection = new TlsConnection(address.getHostAddress(), regular_port);
connection.assertCannotConnect();
cluster.startup();
Assert.assertEquals("TLS native connection should be possible with valid keystore by default", ConnectResult.FAILED_TO_NEGOTIATE, connection.connect());
}
}
Aggregations