use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class MessageFiltersTest method outboundBeforeInbound.
@Test
public void outboundBeforeInbound() throws Throwable {
try (Cluster cluster = Cluster.create(2)) {
InetAddressAndPort other = InetAddressAndPort.getByAddressOverrideDefaults(cluster.get(2).broadcastAddress().getAddress(), cluster.get(2).broadcastAddress().getPort());
CountDownLatch waitForIt = new CountDownLatch(1);
Set<Integer> outboundMessagesSeen = new HashSet<>();
Set<Integer> inboundMessagesSeen = new HashSet<>();
AtomicBoolean outboundAfterInbound = new AtomicBoolean(false);
cluster.filters().outbound().verbs(Verb.ECHO_REQ.id, Verb.ECHO_RSP.id).messagesMatching((from, to, msg) -> {
outboundMessagesSeen.add(msg.verb());
if (inboundMessagesSeen.contains(msg.verb()))
outboundAfterInbound.set(true);
return false;
}).drop();
cluster.filters().inbound().verbs(Verb.ECHO_REQ.id, Verb.ECHO_RSP.id).messagesMatching((from, to, msg) -> {
inboundMessagesSeen.add(msg.verb());
return false;
}).drop();
cluster.filters().inbound().verbs(Verb.ECHO_RSP.id).messagesMatching((from, to, msg) -> {
waitForIt.countDown();
return false;
}).drop();
cluster.get(1).runOnInstance(() -> {
MessagingService.instance().send(Message.out(Verb.ECHO_REQ, NoPayload.noPayload), other);
});
waitForIt.await();
Assert.assertEquals(outboundMessagesSeen, inboundMessagesSeen);
// since both are equal, only need to confirm the size of one
Assert.assertEquals(2, outboundMessagesSeen.size());
Assert.assertFalse("outbound message saw after inbound", outboundAfterInbound.get());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NativeTransportEncryptionOptionsTest method connectionCannotAgreeOnClientAndServerTest.
@Test
public void connectionCannotAgreeOnClientAndServerTest() throws Throwable {
try (Cluster cluster = builder().withNodes(1).withConfig(c -> {
c.with(Feature.NATIVE_PROTOCOL);
c.set("client_encryption_options", ImmutableMap.builder().putAll(validKeystore).put("enabled", true).put("accepted_protocols", Collections.singletonList("TLSv1.2")).put("cipher_suites", Collections.singletonList("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384")).build());
}).start()) {
InetAddress address = cluster.get(1).config().broadcastAddress().getAddress();
int port = (int) cluster.get(1).config().get("native_transport_port");
TlsConnection connection = new TlsConnection(address.getHostAddress(), port, Collections.singletonList("TLSv1.2"), Collections.singletonList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"));
Assert.assertEquals("Should not be possible to establish a TLSv1.2 connection with different ciphers", ConnectResult.FAILED_TO_NEGOTIATE, connection.connect());
connection.assertReceivedHandshakeException();
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class NetworkTopologyTest method noWarningForNetworkTopologyStategyConfigOnRestart.
@Test
public void noWarningForNetworkTopologyStategyConfigOnRestart() throws Exception {
int nodesPerDc = 2;
try (Cluster cluster = builder().withConfig(c -> c.with(GOSSIP, NETWORK)).withRacks(2, 1, nodesPerDc).start()) {
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', " + "'datacenter1' : " + nodesPerDc + ", 'datacenter2' : " + nodesPerDc + " };");
cluster.get(2).nodetool("flush");
// Stop node 2 in datacenter 1
cluster.get(2).shutdown().get();
// Restart node 2 in datacenter 1
cluster.get(2).startup();
List<String> result = cluster.get(2).logs().grep("Ignoring Unrecognized strategy option \\{datacenter2\\}").getResult();
Assert.assertTrue("Not expected to see the warning about unrecognized option", result.isEmpty());
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class PagingTest method testPaging.
@Test
public void testPaging() throws Throwable {
try (Cluster cluster = init(builder().withNodes(3).start());
Cluster singleNode = init(builder().withNodes(1).withSubnet(1).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))"));
singleNode.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))"));
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, ?, ?)"), QUORUM, i, j, i + i);
singleNode.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, ?, ?)"), QUORUM, i, j, i + i);
}
}
int[] pageSizes = new int[] { 1, 2, 3, 5, 10, 20, 50, Integer.MAX_VALUE };
String[] statements = new String[] { withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck >= 5"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 AND ck <= 10"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 LIMIT 3"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck >= 5 LIMIT 2"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 AND ck <= 10 LIMIT 2"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 ORDER BY ck DESC"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck >= 5 ORDER BY ck DESC"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 AND ck <= 10 ORDER BY ck DESC"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 ORDER BY ck DESC LIMIT 3"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck >= 5 ORDER BY ck DESC LIMIT 2"), withKeyspace("SELECT * FROM %s.tbl WHERE pk = 1 AND ck > 5 AND ck <= 10 ORDER BY ck DESC LIMIT 2"), withKeyspace("SELECT DISTINCT pk FROM %s.tbl LIMIT 3"), withKeyspace("SELECT DISTINCT pk FROM %s.tbl WHERE pk IN (3,5,8,10)"), withKeyspace("SELECT DISTINCT pk FROM %s.tbl WHERE pk IN (3,5,8,10) LIMIT 2") };
for (String statement : statements) {
Object[][] noPagingRows = singleNode.coordinator(1).execute(statement, QUORUM);
for (int pageSize : pageSizes) {
Iterator<Object[]> pagingRows = cluster.coordinator(1).executeWithPaging(statement, QUORUM, pageSize);
assertRows(Iterators.toArray(pagingRows, Object[].class), noPagingRows);
}
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class ReadFailureTest method testSpecExecRace.
/**
* This test attempts to create a race condition with speculative executions that would previously cause an AssertionError.
* N=2, RF=2, read ONE
* The read will fail on the local node due to tombstone read threshold. At the same time, a spec exec is triggered
* reading from the other node.
* <p>
* See CASSANDRA-16097 for further details.
*/
@Test
public void testSpecExecRace() throws Throwable {
try (Cluster cluster = init(Cluster.build().withNodes(2).withConfig(config -> config.set("tombstone_failure_threshold", TOMBSTONE_FAIL_THRESHOLD)).start())) {
// Create a table with the spec exec policy set to a low percentile so it's more likely to produce a spec exec racing with the local request.
// Not using 'Always' because that actually uses a different class/mechanism and doesn't exercise the bug
// we're trying to produce.
cluster.schemaChange(String.format("CREATE TABLE %s.%s (k int, c int, v int, PRIMARY KEY (k,c)) WITH speculative_retry = '5p';", KEYSPACE, TABLE));
// Create a partition with enough tombstones to create a read failure according to the configured threshold
for (int i = 0; i <= TOMBSTONE_FAIL_THRESHOLD; ++i) cluster.coordinator(1).execute(String.format("DELETE FROM %s.t WHERE k=%d AND c=%d", KEYSPACE, TOMBSTONE_FAIL_KEY, i), ConsistencyLevel.TWO);
// Create a bunch of latency samples for this failed operation.
loopFailStatement(cluster, 5000);
// Update the spec exec threshold based on the above samples.
// This would normally be done by the periodic task CassandraDaemon.SPECULATION_THRESHOLD_UPDATER.
cluster.get(1).runOnInstance(() -> {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE);
cfs.updateSpeculationThreshold();
});
// Run the request a bunch of times under racy conditions.
loopFailStatement(cluster, 5000);
}
}
Aggregations