use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testCompactAndNonCompactTableWithRowOnOneNodeAndRowDeletionOnTheOther.
@Test
public void testCompactAndNonCompactTableWithRowOnOneNodeAndRowDeletionOnTheOther() throws Throwable {
for (String options : new String[] { "WITH COMPACT STORAGE", "" }) {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v int, PRIMARY KEY (pk, ck)) " + options));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, '1', 1) USING TIMESTAMP 1000"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (2, '1', 1) USING TIMESTAMP 1001"));
cluster.get(1).flush(KEYSPACE);
cluster.get(1).executeInternal(withKeyspace("UPDATE %s.tbl USING TIMESTAMP 2000 SET v = 2 WHERE pk = 1 AND ck = '1'"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (2, '2', 2) USING TIMESTAMP 3001"));
cluster.get(1).flush(KEYSPACE);
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 2001 WHERE pk=2 AND ck='1'"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 3000 WHERE pk=1 AND ck='1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=2 AND ck='1'"), ConsistencyLevel.ALL, row(2, "2", 2)));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=2 AND ck='1'"), ConsistencyLevel.ALL, row(2)));
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testNonCompactTableWithOnlyUpdatedColumnOnOneNodeAndColumnDeletionOnTheOther.
@Test
public void testNonCompactTableWithOnlyUpdatedColumnOnOneNodeAndColumnDeletionOnTheOther() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v1 int, v2 int, PRIMARY KEY (pk, ck)) WITH read_repair='NONE'"));
cluster.get(1).executeInternal(withKeyspace("UPDATE %s.tbl USING TIMESTAMP 2000 SET v1 = 1, v2 = 2 WHERE pk = 1 AND ck = '1'"));
cluster.get(1).flush(KEYSPACE);
cluster.get(2).executeInternal(withKeyspace("DELETE v1 FROM %s.tbl USING TIMESTAMP 3000 WHERE pk=1 AND ck='1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row(1, "1", null, 2));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v1 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row((Integer) null));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v2 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row((Integer) 2));
cluster.get(2).executeInternal(withKeyspace("DELETE v2 FROM %s.tbl USING TIMESTAMP 4000 WHERE pk=1 AND ck='1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v1 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v2 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testNonCompactTableWithRowOnOneNodeAndColumnDeletionOnTheOther.
@Test
public void testNonCompactTableWithRowOnOneNodeAndColumnDeletionOnTheOther() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v int, PRIMARY KEY (pk, ck))"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, '1', 1) USING TIMESTAMP 1000"));
cluster.get(1).flush(KEYSPACE);
cluster.get(1).executeInternal(withKeyspace("UPDATE %s.tbl USING TIMESTAMP 2000 SET v = 2 WHERE pk = 1 AND ck = '1'"));
cluster.get(1).flush(KEYSPACE);
cluster.get(2).executeInternal(withKeyspace("DELETE v FROM %s.tbl USING TIMESTAMP 3000 WHERE pk=1 AND ck='1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row(1, "1", null));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row((Integer) null));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class StreamPrepareFailTest method streamPrepareFailTest.
@Test
public void streamPrepareFailTest() throws Throwable {
try (Cluster cluster = init(Cluster.build(2).withInstanceInitializer(StreamFailHelper::install).withConfig(config -> config.with(NETWORK, GOSSIP)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
try {
cluster.get(1).runOnInstance(() -> StorageService.instance.rebuild(null));
fail("rebuild should throw exception");
} catch (RuntimeException e) {
cluster.get(2).runOnInstance(() -> assertTrue(StreamFailHelper.thrown.get()));
assertTrue(e.getMessage().contains("Stream failed"));
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class TopologyChangeTest method testRestartNode.
@Test
public void testRestartNode() throws Throwable {
try (Cluster control = init(Cluster.build().withNodes(3).withNodeProvisionStrategy(strategy).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).start());
com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session session = cluster.connect()) {
EventStateListener eventStateListener = new EventStateListener();
session.getCluster().register(eventStateListener);
control.get(3).shutdown().get();
await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> Assert.assertEquals(2, cluster.getMetadata().getAllHosts().stream().filter(h -> h.isUp()).count()));
control.get(3).startup();
await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> Assert.assertEquals(3, cluster.getMetadata().getAllHosts().stream().filter(h -> h.isUp()).count()));
// DOWN UP can also be seen if the jvm is slow and connections are closed, but make sure it at least happens once
// given the node restarts
assertThat(eventStateListener.events).containsSequence(new Event(Down, control.get(3)), new Event(Up, control.get(3)));
}
}
Aggregations