use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SchemaTest method readRepairWithCompaction.
@Test
public void readRepairWithCompaction() throws Throwable {
try (Cluster cluster = init(Cluster.build(2).start())) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, primary key (pk, ck))");
String name = "v10";
cluster.get(1).schemaChangeInternal("ALTER TABLE " + KEYSPACE + ".tbl ADD " + name + " list<int>");
cluster.get(1).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1, v2) values (?,1,1,1)", 1);
selectSilent(cluster, name);
cluster.get(2).flush(KEYSPACE);
cluster.get(2).schemaChangeInternal("ALTER TABLE " + KEYSPACE + ".tbl ADD " + name + " list<int>");
cluster.get(2).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1, v2, " + name + ") values (?,1,1,1,[1])", 1);
cluster.get(2).flush(KEYSPACE);
cluster.get(2).forceCompact(KEYSPACE, "tbl");
cluster.get(2).shutdown().get();
cluster.get(2).startup();
cluster.get(2).forceCompact(KEYSPACE, "tbl");
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SchemaTest method readRepair.
@Test
public void readRepair() throws Throwable {
try (Cluster cluster = init(Cluster.build(2).start())) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, primary key (pk, ck))");
String name = "aaa";
cluster.get(1).schemaChangeInternal("ALTER TABLE " + KEYSPACE + ".tbl ADD " + name + " list<int>");
cluster.get(1).executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1, v2) values (?,1,1,1)", 1);
selectSilent(cluster, name);
cluster.get(2).flush(KEYSPACE);
cluster.get(2).schemaChangeInternal("ALTER TABLE " + KEYSPACE + ".tbl ADD " + name + " list<int>");
cluster.get(2).shutdown().get();
cluster.get(2).startup();
cluster.get(2).forceCompact(KEYSPACE, "tbl");
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testNonCompactTableWithRowOnOneNodeMissingAColumnAndColumnDeletionOnTheOther.
@Test
public void testNonCompactTableWithRowOnOneNodeMissingAColumnAndColumnDeletionOnTheOther() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v1 int, v2 int, PRIMARY KEY (pk, ck))"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1, v2) VALUES (1, '1', 1, 1) USING TIMESTAMP 1000"));
cluster.get(1).flush(KEYSPACE);
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v1) VALUES (1, '1', 2) USING TIMESTAMP 2000"));
cluster.get(1).flush(KEYSPACE);
cluster.get(2).executeInternal(withKeyspace("DELETE v1 FROM %s.tbl USING TIMESTAMP 3000 WHERE pk=1 AND ck='1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row(1, "1", null, 1));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v1 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row((Integer) null));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v2 FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row(1));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testCompactAndNonCompactTableWithRowOnOneNodeAndRangeDeletionOnTheOther.
@Test
public void testCompactAndNonCompactTableWithRowOnOneNodeAndRangeDeletionOnTheOther() throws Throwable {
for (String options : new String[] { "WITH COMPACT STORAGE", "" }) {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v int, PRIMARY KEY (pk, ck)) " + options));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, '1', 1) USING TIMESTAMP 1000"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, '2', 2) USING TIMESTAMP 1001"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (2, '1', 1) USING TIMESTAMP 1001"));
cluster.get(1).flush(KEYSPACE);
cluster.get(1).executeInternal(withKeyspace("UPDATE %s.tbl USING TIMESTAMP 2000 SET v = 2 WHERE pk = 1 AND ck = '1'"));
cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (2, '2', 2) USING TIMESTAMP 3001"));
cluster.get(1).flush(KEYSPACE);
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 2001 WHERE pk=2 AND ck >= '1' AND ck < '2'"));
cluster.get(2).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 3000 WHERE pk=1 AND ck >= '1'"));
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='2'"), ConsistencyLevel.ALL, row(1, "2", 2)));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='2'"), ConsistencyLevel.ALL, row(2)));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL, row(2, "2", 2)));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL, row(2)));
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class SinglePartitionReadCommandTest method testNonCompactTableWithEmptyRowOnBothNodes.
@Test
public void testNonCompactTableWithEmptyRowOnBothNodes() throws Throwable {
try (Cluster cluster = init(builder().withNodes(2).start())) {
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck text, v int, PRIMARY KEY (pk, ck))"));
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, '1', 1) USING TIMESTAMP 1000"), ConsistencyLevel.ALL);
cluster.get(1).flush(KEYSPACE);
cluster.coordinator(1).execute(withKeyspace("DELETE v FROM %s.tbl USING TIMESTAMP 2000 WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL);
cluster.get(1).flush(KEYSPACE);
cluster.get(2).flush(KEYSPACE);
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row(1, "1", null));
assertRows(cluster.coordinator(2).execute(withKeyspace("SELECT v FROM %s.tbl WHERE pk=1 AND ck='1'"), ConsistencyLevel.ALL), row((Integer) null));
}
}
Aggregations