use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class ColumnFamilyStoreTest method testDeleteStandardRowSticksAfterFlush.
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable {
// test to make sure flushing after a delete doesn't resurrect delted cols.
String keyspaceName = KEYSPACE1;
String cfName = CF_STANDARD1;
Keyspace keyspace = Keyspace.open(keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
ByteBuffer col = ByteBufferUtil.bytes("val");
ByteBuffer val = ByteBufferUtil.bytes("val1");
// insert
ColumnMetadata newCol = ColumnMetadata.regularColumn(cfs.metadata(), ByteBufferUtil.bytes("val2"), AsciiType.instance);
new RowUpdateBuilder(cfs.metadata(), 0, "key1").clustering("Column1").add("val", "val1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 0, "key2").clustering("Column1").add("val", "val1").build().applyUnsafe();
assertRangeCount(cfs, col, val, 2);
// flush.
cfs.forceBlockingFlush();
// insert, don't flush
new RowUpdateBuilder(cfs.metadata(), 1, "key3").clustering("Column1").add("val", "val1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 1, "key4").clustering("Column1").add("val", "val1").build().applyUnsafe();
assertRangeCount(cfs, col, val, 4);
// delete (from sstable and memtable)
RowUpdateBuilder.deleteRow(cfs.metadata(), 5, "key1", "Column1").applyUnsafe();
RowUpdateBuilder.deleteRow(cfs.metadata(), 5, "key3", "Column1").applyUnsafe();
// verify delete
assertRangeCount(cfs, col, val, 2);
// flush
cfs.forceBlockingFlush();
// re-verify delete. // first breakage is right here because of CASSANDRA-1837.
assertRangeCount(cfs, col, val, 2);
// simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
new RowUpdateBuilder(cfs.metadata(), 2, "key1").clustering("Column1").add("val", "val1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 2, "key3").clustering("Column1").add("val", "val1").build().applyUnsafe();
// should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
assertRangeCount(cfs, col, val, 2);
// make sure that new writes are recognized.
new RowUpdateBuilder(cfs.metadata(), 10, "key5").clustering("Column1").add("val", "val1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 10, "key6").clustering("Column1").add("val", "val1").build().applyUnsafe();
assertRangeCount(cfs, col, val, 4);
// and it remains so after flush. (this wasn't failing before, but it's good to check.)
cfs.forceBlockingFlush();
assertRangeCount(cfs, col, val, 4);
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class CounterMutationTest method testBatch.
@Test
public void testBatch() throws WriteTimeoutException {
ColumnFamilyStore cfsOne = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF1);
ColumnFamilyStore cfsTwo = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF2);
cfsOne.truncateBlocking();
cfsTwo.truncateBlocking();
// Do the update (+1, -1), (+2, -2)
Mutation batch = new Mutation(KEYSPACE1, Util.dk("key1"));
batch.add(new RowUpdateBuilder(cfsOne.metadata(), 5, "key1").clustering("cc").add("val", 1L).add("val2", -1L).build().get(cfsOne.metadata()));
batch.add(new RowUpdateBuilder(cfsTwo.metadata(), 5, "key1").clustering("cc").add("val", 2L).add("val2", -2L).build().get(cfsTwo.metadata()));
new CounterMutation(batch, ConsistencyLevel.ONE).apply();
ColumnMetadata c1cfs1 = cfsOne.metadata().getColumn(ByteBufferUtil.bytes("val"));
ColumnMetadata c2cfs1 = cfsOne.metadata().getColumn(ByteBufferUtil.bytes("val2"));
Row row = Util.getOnlyRow(Util.cmd(cfsOne).includeRow("cc").columns("val", "val2").build());
assertEquals(1L, CounterContext.instance().total(row.getCell(c1cfs1).value()));
assertEquals(-1L, CounterContext.instance().total(row.getCell(c2cfs1).value()));
ColumnMetadata c1cfs2 = cfsTwo.metadata().getColumn(ByteBufferUtil.bytes("val"));
ColumnMetadata c2cfs2 = cfsTwo.metadata().getColumn(ByteBufferUtil.bytes("val2"));
row = Util.getOnlyRow(Util.cmd(cfsTwo).includeRow("cc").columns("val", "val2").build());
assertEquals(2L, CounterContext.instance().total(row.getCell(c1cfs2).value()));
assertEquals(-2L, CounterContext.instance().total(row.getCell(c2cfs2).value()));
// Check the caches, separately
CBuilder cb = CBuilder.create(cfsOne.metadata().comparator);
cb.add("cc");
assertEquals(1L, cfsOne.getCachedCounter(Util.dk("key1").getKey(), cb.build(), c1cfs1, null).count);
assertEquals(-1L, cfsOne.getCachedCounter(Util.dk("key1").getKey(), cb.build(), c2cfs1, null).count);
assertEquals(2L, cfsTwo.getCachedCounter(Util.dk("key1").getKey(), cb.build(), c1cfs2, null).count);
assertEquals(-2L, cfsTwo.getCachedCounter(Util.dk("key1").getKey(), cb.build(), c2cfs2, null).count);
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class CounterMutationTest method testDeletes.
@Test
public void testDeletes() throws WriteTimeoutException {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF1);
cfs.truncateBlocking();
ColumnMetadata cOne = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
ColumnMetadata cTwo = cfs.metadata().getColumn(ByteBufferUtil.bytes("val2"));
// Do the initial update (+1, -1)
new CounterMutation(new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("cc").add("val", 1L).add("val2", -1L).build(), ConsistencyLevel.ONE).apply();
Row row = Util.getOnlyRow(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
assertEquals(1L, CounterContext.instance().total(row.getCell(cOne).value()));
assertEquals(-1L, CounterContext.instance().total(row.getCell(cTwo).value()));
// Remove the first counter, increment the second counter
new CounterMutation(new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("cc").delete(cOne).add("val2", -5L).build(), ConsistencyLevel.ONE).apply();
row = Util.getOnlyRow(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
assertEquals(null, row.getCell(cOne));
assertEquals(-6L, CounterContext.instance().total(row.getCell(cTwo).value()));
// Increment the first counter, make sure it's still shadowed by the tombstone
new CounterMutation(new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("cc").add("val", 1L).build(), ConsistencyLevel.ONE).apply();
row = Util.getOnlyRow(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
assertEquals(null, row.getCell(cOne));
// Get rid of the complete partition
RowUpdateBuilder.deleteRow(cfs.metadata(), 6, "key1", "cc").applyUnsafe();
Util.assertEmpty(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
// Increment both counters, ensure that both stay dead
new CounterMutation(new RowUpdateBuilder(cfs.metadata(), 6, "key1").clustering("cc").add("val", 1L).add("val2", 1L).build(), ConsistencyLevel.ONE).apply();
Util.assertEmpty(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class CounterMutationTest method addTwoAndCheck.
private void addTwoAndCheck(ColumnFamilyStore cfs, long addOne, long expectedOne, long addTwo, long expectedTwo) {
ColumnMetadata cDefOne = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
ColumnMetadata cDefTwo = cfs.metadata().getColumn(ByteBufferUtil.bytes("val2"));
Mutation m = new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("cc").add("val", addOne).add("val2", addTwo).build();
new CounterMutation(m, ConsistencyLevel.ONE).apply();
Row row = Util.getOnlyRow(Util.cmd(cfs).includeRow("cc").columns("val", "val2").build());
assertEquals(expectedOne, CounterContext.instance().total(row.getCell(cDefOne).value()));
assertEquals(expectedTwo, CounterContext.instance().total(row.getCell(cDefTwo).value()));
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class DeletePartitionTest method testDeletePartition.
public void testDeletePartition(DecoratedKey key, boolean flushBeforeRemove, boolean flushAfterRemove) {
ColumnFamilyStore store = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
ColumnMetadata column = store.metadata().getColumn(ByteBufferUtil.bytes("val"));
// write
new RowUpdateBuilder(store.metadata(), 0, key.getKey()).clustering("Column1").add("val", "asdf").build().applyUnsafe();
// validate that data's written
FilteredPartition partition = Util.getOnlyPartition(Util.cmd(store, key).build());
assertTrue(partition.rowCount() > 0);
Row r = partition.iterator().next();
assertTrue(r.getCell(column).value().equals(ByteBufferUtil.bytes("asdf")));
if (flushBeforeRemove)
store.forceBlockingFlush();
// delete the partition
new Mutation(KEYSPACE1, key).add(PartitionUpdate.fullPartitionDelete(store.metadata(), key, 0, FBUtilities.nowInSeconds())).applyUnsafe();
if (flushAfterRemove)
store.forceBlockingFlush();
// validate removal
ImmutableBTreePartition partitionUnfiltered = Util.getOnlyPartitionUnfiltered(Util.cmd(store, key).build());
assertFalse(partitionUnfiltered.partitionLevelDeletion().isLive());
assertFalse(partitionUnfiltered.iterator().hasNext());
}
Aggregations