use of org.apache.cassandra.distributed.api.ICluster in project cassandra by apache.
the class LargeColumnTest method testLargeColumns.
private void testLargeColumns(int nodes, int columnSize, int rowCount) throws Throwable {
Random random = new Random();
long seed = ThreadLocalRandom.current().nextLong();
logger.info("Using seed {}", seed);
try (ICluster cluster = init(builder().withNodes(nodes).withConfig(config -> config.set("commitlog_segment_size", String.format("%dMiB", (columnSize * 3) >> 20)).set("internode_application_send_queue_reserve_endpoint_capacity", String.format("%dB", (columnSize * 2))).set("internode_application_send_queue_reserve_global_capacity", String.format("%dB", (columnSize * 3))).set("write_request_timeout", "30s").set("read_request_timeout", "30s").set("memtable_heap_space", "1024MiB")).start())) {
cluster.schemaChange(String.format("CREATE TABLE %s.cf (k int, c text, PRIMARY KEY (k))", KEYSPACE));
for (int i = 0; i < rowCount; ++i) cluster.coordinator(1).execute(String.format("INSERT INTO %s.cf (k, c) VALUES (?, ?);", KEYSPACE), ConsistencyLevel.ALL, i, str(columnSize, random, seed | i));
for (int i = 0; i < rowCount; ++i) {
Object[][] results = cluster.coordinator(1).execute(String.format("SELECT k, c FROM %s.cf WHERE k = ?;", KEYSPACE), ConsistencyLevel.ALL, i);
Assert.assertTrue(str(columnSize, random, seed | i).equals(results[0][1]));
}
}
}
use of org.apache.cassandra.distributed.api.ICluster in project cassandra by apache.
the class MessageFiltersTest method testMessageMatching.
@Test
public void testMessageMatching() throws Throwable {
String read = "SELECT * FROM " + KEYSPACE + ".tbl";
String write = "INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)";
try (ICluster<IInvokableInstance> cluster = builder().withNodes(2).withConfig(c -> c.set("range_request_timeout", "2000ms")).start()) {
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + cluster.size() + "};");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
AtomicInteger counter = new AtomicInteger();
Set<Integer> verbs = Sets.newHashSet(Arrays.asList(Verb.RANGE_REQ.id, Verb.RANGE_RSP.id, Verb.MUTATION_REQ.id, Verb.MUTATION_RSP.id));
for (boolean inbound : Arrays.asList(true, false)) {
counter.set(0);
// Reads and writes are going to time out in both directions
IMessageFilters.Filter filter = cluster.filters().allVerbs().inbound(inbound).from(1).to(2).messagesMatching((from, to, msg) -> {
// Decode and verify message on instance; return the result back here
Integer id = cluster.get(1).callsOnInstance((IIsolatedExecutor.SerializableCallable<Integer>) () -> {
Message decoded = Instance.deserializeMessage(msg);
return (Integer) decoded.verb().id;
}).call();
Assert.assertTrue(verbs.contains(id));
counter.incrementAndGet();
return false;
}).drop();
for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(read, ConsistencyLevel.ALL);
for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(write, ConsistencyLevel.ALL);
filter.off();
Assert.assertEquals(4, counter.get());
}
}
}
use of org.apache.cassandra.distributed.api.ICluster in project cassandra by apache.
the class NativeProtocolTest method withClientRequests.
@Test
public void withClientRequests() throws Throwable {
try (ICluster ignored = init(builder().withNodes(3).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).start())) {
try (com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session session = cluster.connect()) {
session.execute("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));");
session.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) values (1,1,1);");
Statement select = new SimpleStatement("select * from " + KEYSPACE + ".tbl;").setConsistencyLevel(ConsistencyLevel.ALL);
final ResultSet resultSet = session.execute(select);
assertRows(RowUtil.toObjects(resultSet), row(1, 1, 1));
Assert.assertEquals(3, cluster.getMetadata().getAllHosts().size());
}
}
}
use of org.apache.cassandra.distributed.api.ICluster in project cassandra by apache.
the class NodeToolTest method testSetCacheCapacityWhenDisabled.
@Test
public void testSetCacheCapacityWhenDisabled() throws Throwable {
try (ICluster cluster = init(builder().withNodes(1).withConfig(c -> c.set("row_cache_size", "0MiB")).start())) {
NodeToolResult ringResult = cluster.get(1).nodetoolResult("setcachecapacity", "1", "1", "1");
ringResult.asserts().stderrContains("is not permitted as this cache is disabled");
}
}
use of org.apache.cassandra.distributed.api.ICluster in project cassandra by apache.
the class PrepareBatchStatementsTest method testPreparedBatch.
@Test
public void testPreparedBatch() throws Exception {
try (ICluster<IInvokableInstance> c = init(builder().withNodes(1).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).start())) {
try (com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session s = cluster.connect()) {
c.schemaChange(withKeyspace("CREATE KEYSPACE ks1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
c.schemaChange(withKeyspace("CREATE TABLE ks1.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));"));
c.schemaChange(withKeyspace("CREATE KEYSPACE ks2 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
c.schemaChange(withKeyspace("CREATE TABLE ks2.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));"));
String batch1 = "BEGIN BATCH\n" + "UPDATE ks1.tbl SET v = ? where pk = ? and ck = ?;\n" + "UPDATE ks2.tbl SET v = ? where pk = ? and ck = ?;\n" + "APPLY BATCH;";
String batch2 = "BEGIN BATCH\n" + "INSERT INTO ks1.tbl (pk, ck, v) VALUES (?, ?, ?);\n" + "INSERT INTO tbl (pk, ck, v) VALUES (?, ?, ?);\n" + "APPLY BATCH;";
PreparedStatement prepared;
prepared = s.prepare(batch1);
s.execute(prepared.bind(1, 1, 1, 1, 1, 1));
c.get(1).runOnInstance(() -> {
// no USE here, only a fully qualified batch - should get stored ONCE
List<String> stmts = StorageService.instance.getPreparedStatements().stream().map(p -> p.right).collect(Collectors.toList());
assertEquals(Lists.newArrayList(batch1), stmts);
QueryProcessor.clearPreparedStatements(false);
});
s.execute("use ks2");
prepared = s.prepare(batch1);
s.execute(prepared.bind(1, 1, 1, 1, 1, 1));
c.get(1).runOnInstance(() -> {
// after USE, fully qualified - should get stored twice! Once with null keyspace (new behaviour) once with ks2 keyspace (old behaviour)
List<String> stmts = StorageService.instance.getPreparedStatements().stream().map(p -> p.right).collect(Collectors.toList());
assertEquals(Lists.newArrayList(batch1, batch1), stmts);
QueryProcessor.clearPreparedStatements(false);
});
prepared = s.prepare(batch2);
s.execute(prepared.bind(1, 1, 1, 1, 1, 1));
c.get(1).runOnInstance(() -> {
// after USE, should get stored twice, once with keyspace, once without
List<String> stmts = StorageService.instance.getPreparedStatements().stream().map(p -> p.right).collect(Collectors.toList());
assertEquals(Lists.newArrayList(batch2, batch2), stmts);
QueryProcessor.clearPreparedStatements(false);
});
}
}
}
Aggregations