use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class RowResolverTest method testResolveSupersetNullTwo.
@Test
public void testResolveSupersetNullTwo() {
ColumnFamily cf1 = ColumnFamily.create("Keyspace1", "Standard1");
cf1.addColumn(column("c1", "v1", 0));
ColumnFamily resolved = RowRepairResolver.resolveSuperset(Arrays.asList(cf1, null));
assertColumns(resolved, "c1");
assertNull(ColumnFamily.diff(cf1, resolved));
assertColumns(ColumnFamily.diff(null, resolved), "c1");
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class RowResolverTest method testResolveMultipleDeleted.
@Test
public void testResolveMultipleDeleted() {
// deletes and columns with interleaved timestamp, with out of order return sequence
ColumnFamily cf1 = ColumnFamily.create("Keyspace1", "Standard1");
cf1.delete((int) (System.currentTimeMillis() / 1000), 0);
// these columns created after the previous deletion
ColumnFamily cf2 = ColumnFamily.create("Keyspace1", "Standard1");
cf2.addColumn(column("one", "A", 1));
cf2.addColumn(column("two", "A", 1));
//this column created after the next delete
ColumnFamily cf3 = ColumnFamily.create("Keyspace1", "Standard1");
cf3.addColumn(column("two", "B", 3));
ColumnFamily cf4 = ColumnFamily.create("Keyspace1", "Standard1");
cf4.delete((int) (System.currentTimeMillis() / 1000), 2);
ColumnFamily resolved = RowRepairResolver.resolveSuperset(Arrays.asList(cf1, cf2, cf3, cf4));
// will have deleted marker and one column
assertColumns(resolved, "two");
assertColumn(resolved, "two", "B", 3);
assertTrue(resolved.isMarkedForDelete());
assertEquals(2, resolved.getMarkedForDeleteAt());
ColumnFamily scf1 = ColumnFamily.create("Keyspace1", "Super1");
scf1.delete((int) (System.currentTimeMillis() / 1000), 0);
// these columns created after the previous deletion
ColumnFamily scf2 = ColumnFamily.create("Keyspace1", "Super1");
scf2.addColumn(superColumn(scf2, "super1", column("one", "A", 1), column("two", "A", 1)));
//these columns created after the next delete
ColumnFamily scf3 = ColumnFamily.create("Keyspace1", "Super1");
scf3.addColumn(superColumn(scf3, "super1", column("two", "B", 3)));
scf3.addColumn(superColumn(scf3, "super2", column("three", "A", 3), column("four", "A", 3)));
ColumnFamily scf4 = ColumnFamily.create("Keyspace1", "Super1");
scf4.delete((int) (System.currentTimeMillis() / 1000), 2);
ColumnFamily superResolved = RowRepairResolver.resolveSuperset(Arrays.asList(scf1, scf2, scf3, scf4));
// will have deleted marker and two super cols
assertColumns(superResolved, "super1", "super2");
assertSubColumns(superResolved, "super1", "two");
assertSubColumn(superResolved, "super1", "two", "B", 3);
assertSubColumns(superResolved, "super2", "four", "three");
assertSubColumn(superResolved, "super2", "three", "A", 3);
assertSubColumn(superResolved, "super2", "four", "A", 3);
assertTrue(superResolved.isMarkedForDelete());
assertEquals(2, superResolved.getMarkedForDeleteAt());
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class RowResolverTest method testResolveSupersetNullOne.
@Test
public void testResolveSupersetNullOne() {
ColumnFamily cf2 = ColumnFamily.create("Keyspace1", "Standard1");
cf2.addColumn(column("c2", "v2", 1));
ColumnFamily resolved = RowRepairResolver.resolveSuperset(Arrays.asList(null, cf2));
assertColumns(resolved, "c2");
assertColumns(ColumnFamily.diff(null, resolved), "c2");
assertNull(ColumnFamily.diff(cf2, resolved));
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeCachedRow.
@Test
public void testCompactionPurgeCachedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "RowCacheSpace";
String cfName = "CachedCF";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// move the key up in row cache
cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
// deletes row
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 1);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedRow.
@Test
public void testCompactionPurgeTombstonedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Standard1";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes row with timestamp such that not all columns are deleted
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 4);
rm.apply();
// flush and major compact (with tombstone purging)
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
Aggregations