use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewComplexDeletionsPartialTest method testPartialDeleteUnselectedColumn.
private void testPartialDeleteUnselectedColumn(boolean flush) throws Throwable {
createTable("CREATE TABLE %s (k int, c int, a int, b int, PRIMARY KEY (k, c))");
createView("CREATE MATERIALIZED VIEW %s AS " + "SELECT k,c FROM %s WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (k,c)");
Keyspace ks = Keyspace.open(keyspace());
ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
updateView("UPDATE %s USING TIMESTAMP 10 SET b=1 WHERE k=1 AND c=1");
if (flush)
Util.flush(ks);
assertRows(execute("SELECT * from %s"), row(1, 1, null, 1));
assertRows(executeView("SELECT * FROM %s"), row(1, 1));
updateView("DELETE b FROM %s USING TIMESTAMP 11 WHERE k=1 AND c=1");
if (flush)
Util.flush(ks);
assertEmpty(execute("SELECT * from %s"));
assertEmpty(executeView("SELECT * FROM %s"));
updateView("UPDATE %s USING TIMESTAMP 1 SET a=1 WHERE k=1 AND c=1");
if (flush)
Util.flush(ks);
assertRows(execute("SELECT * from %s"), row(1, 1, 1, null));
assertRows(executeView("SELECT * FROM %s"), row(1, 1));
execute("truncate %s;");
// removal generated by unselected column should not shadow PK update with smaller timestamp
updateViewWithFlush("UPDATE %s USING TIMESTAMP 18 SET a=1 WHERE k=1 AND c=1", flush);
assertRows(execute("SELECT * from %s"), row(1, 1, 1, null));
assertRows(executeView("SELECT * FROM %s"), row(1, 1));
updateViewWithFlush("UPDATE %s USING TIMESTAMP 20 SET a=null WHERE k=1 AND c=1", flush);
assertRows(execute("SELECT * from %s"));
assertRows(executeView("SELECT * FROM %s"));
updateViewWithFlush("INSERT INTO %s(k,c) VALUES(1,1) USING TIMESTAMP 15", flush);
assertRows(execute("SELECT * from %s"), row(1, 1, null, null));
assertRows(executeView("SELECT * FROM %s"), row(1, 1));
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewComplexDeletionsTest method complexTimestampWithbasePKColumnsInViewPKDeletionTest.
private void complexTimestampWithbasePKColumnsInViewPKDeletionTest(boolean flush) throws Throwable {
createTable("create table %s (p1 int, p2 int, v1 int, v2 int, primary key(p1, p2))");
Keyspace ks = Keyspace.open(keyspace());
createView("create materialized view %s as select * from %s " + "where p1 is not null and p2 is not null primary key (p2, p1)");
ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
// Set initial values TS=1
updateView("Insert into %s (p1, p2, v1, v2) values (1, 2, 3, 4) using timestamp 1;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2), row(3, 4, 1L));
// remove row/mv TS=2
updateView("Delete from %s using timestamp 2 where p1 = 1 and p2 = 2;");
if (flush)
Util.flush(ks);
// view are empty
assertRowsIgnoringOrder(executeView("SELECT * FROM %s"));
// insert PK with TS=3
updateView("Insert into %s (p1, p2) values (1, 2) using timestamp 3;");
if (flush)
Util.flush(ks);
// deleted column in MV remained dead
assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(2, 1, null, null));
ks.getColumnFamilyStore(currentView()).forceMajorCompaction();
assertRowsIgnoringOrder(executeView("SELECT * FROM %s"), row(2, 1, null, null));
// reset values
updateView("Insert into %s (p1, p2, v1, v2) values (1, 2, 3, 4) using timestamp 10;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2), row(3, 4, 10L));
updateView("UPDATE %s using timestamp 20 SET v2 = 5 WHERE p1 = 1 and p2 = 2");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2), row(3, 5, 20L));
updateView("DELETE FROM %s using timestamp 10 WHERE p1 = 1 and p2 = 2");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, v2, WRITETIME(v2) from %s WHERE p1 = ? AND p2 = ?", 1, 2), row(null, 5, 20L));
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewComplexDeletionsTest method testCommutativeRowDeletion.
private void testCommutativeRowDeletion(boolean flush) throws Throwable {
// CASSANDRA-13409 new update should not resurrect previous deleted data in view
createTable("create table %s (p int primary key, v1 int, v2 int)");
Keyspace ks = Keyspace.open(keyspace());
createView("create materialized view %s as select * from %s " + "where p is not null and v1 is not null primary key (v1, p)");
ks.getColumnFamilyStore(currentView()).disableAutoCompaction();
// sstable-1, Set initial values TS=1
updateView("Insert into %s (p, v1, v2) values (3, 1, 3) using timestamp 1;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v2, WRITETIME(v2) from %s WHERE v1 = ? AND p = ?", 1, 3), row(3, 1L));
// sstable-2
updateView("Delete from %s using timestamp 2 where p = 3;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"));
// sstable-3
updateView("Insert into %s (p, v1) values (3, 1) using timestamp 3;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
// sstable-4
updateView("UPdate %s using timestamp 4 set v1 = 2 where p = 3;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(2, 3, null, null));
// sstable-5
updateView("UPdate %s using timestamp 5 set v1 = 1 where p = 3;");
if (flush)
Util.flush(ks);
assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
if (flush) {
// compact sstable 2 and 4, 5;
ColumnFamilyStore cfs = ks.getColumnFamilyStore(currentView());
List<String> sstables = cfs.getLiveSSTables().stream().sorted(SSTableReader.idComparator).map(SSTableReader::getFilename).collect(Collectors.toList());
String dataFiles = String.join(",", Arrays.asList(sstables.get(1), sstables.get(3), sstables.get(4)));
CompactionManager.instance.forceUserDefinedCompaction(dataFiles);
assertEquals(3, cfs.getLiveSSTables().size());
}
// regular tombstone should be retained after compaction
assertRowsIgnoringOrder(executeView("SELECT v1, p, v2, WRITETIME(v2) from %s"), row(1, 3, null, null));
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewComplexLivenessLimitTest method testExpiredLivenessLimit.
private void testExpiredLivenessLimit(boolean flush) throws Throwable {
createTable("CREATE TABLE %s (k int PRIMARY KEY, a int, b int);");
Keyspace ks = Keyspace.open(keyspace());
String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " + "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)");
String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " + "WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)");
ks.getColumnFamilyStore(mv1).disableAutoCompaction();
ks.getColumnFamilyStore(mv2).disableAutoCompaction();
for (int i = 1; i <= 100; i++) updateView("INSERT INTO %s(k, a, b) VALUES (?, ?, ?);", i, i, i);
for (int i = 1; i <= 100; i++) {
if (i % 50 == 0)
continue;
// create expired liveness
updateView("DELETE a FROM %s WHERE k = ?;", i);
}
if (flush) {
Util.flushTable(ks, mv1);
Util.flushTable(ks, mv2);
}
for (String view : Arrays.asList(mv1, mv2)) {
// paging
assertEquals(1, executeNetWithPaging(String.format("SELECT k,a,b FROM %s limit 1", view), 1).all().size());
assertEquals(2, executeNetWithPaging(String.format("SELECT k,a,b FROM %s limit 2", view), 1).all().size());
assertEquals(2, executeNetWithPaging(String.format("SELECT k,a,b FROM %s", view), 1).all().size());
assertRowsNet(executeNetWithPaging(String.format("SELECT k,a,b FROM %s ", view), 1), row(50, 50, 50), row(100, 100, 100));
// limit
assertEquals(1, execute(String.format("SELECT k,a,b FROM %s limit 1", view)).size());
assertRowsIgnoringOrder(execute(String.format("SELECT k,a,b FROM %s limit 2", view)), row(50, 50, 50), row(100, 100, 100));
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class RangeCommandIteratorTest method testRangeQueried.
@Test
public void testRangeQueried() {
List<Token> tokens = setTokens(100, 200, 300, 400);
// n tokens divide token ring into n+1 ranges
int vnodeCount = tokens.size() + 1;
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.clearUnsafe();
int rows = 100;
for (int i = 0; i < rows; ++i) {
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 10, String.valueOf(i));
builder.clustering("c");
builder.add("val", String.valueOf(i));
builder.build().applyUnsafe();
}
Util.flush(cfs);
PartitionRangeReadCommand command = (PartitionRangeReadCommand) Util.cmd(cfs).build();
AbstractBounds<PartitionPosition> keyRange = command.dataRange().keyRange();
// without range merger, there will be 2 batches requested: 1st batch with 1 range and 2nd batch with remaining ranges
CloseableIterator<ReplicaPlan.ForRangeRead> replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
RangeCommandIterator data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
verifyRangeCommandIterator(data, rows, 2, vnodeCount);
// without range merger and initial cf=5, there will be 1 batches requested: 5 vnode ranges for 1st batch
replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
data = new RangeCommandIterator(replicaPlans, command, vnodeCount, 1000, vnodeCount, nanoTime());
verifyRangeCommandIterator(data, rows, 1, vnodeCount);
// without range merger and max cf=1, there will be 5 batches requested: 1 vnode range per batch
replicaPlans = replicaPlanIterator(keyRange, keyspace, false);
data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
verifyRangeCommandIterator(data, rows, vnodeCount, vnodeCount);
// with range merger, there will be only 1 batch requested, as all ranges share the same replica - localhost
replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
data = new RangeCommandIterator(replicaPlans, command, 1, 1000, vnodeCount, nanoTime());
verifyRangeCommandIterator(data, rows, 1, vnodeCount);
// with range merger and max cf=1, there will be only 1 batch requested, as all ranges share the same replica - localhost
replicaPlans = replicaPlanIterator(keyRange, keyspace, true);
data = new RangeCommandIterator(replicaPlans, command, 1, 1, vnodeCount, nanoTime());
verifyRangeCommandIterator(data, rows, 1, vnodeCount);
}
Aggregations