use of org.apache.cassandra.db.SuperColumn in project brisk by riptano.
the class CassandraStorage method putNext.
public void putNext(Tuple t) throws ExecException, IOException {
ByteBuffer key = objToBB(t.get(0));
DefaultDataBag pairs = (DefaultDataBag) t.get(1);
ArrayList<Mutation> mutationList = new ArrayList<Mutation>();
CfDef cfDef = getCfDef();
List<AbstractType> marshallers = getDefaultMarshallers(cfDef);
Map<ByteBuffer, AbstractType> validators = getValidatorMap(cfDef);
try {
for (Tuple pair : pairs) {
Mutation mutation = new Mutation();
if (// supercolumn
DataType.findType(pair.get(1)) == DataType.BAG) {
org.apache.cassandra.thrift.SuperColumn sc = new org.apache.cassandra.thrift.SuperColumn();
sc.name = objToBB(pair.get(0));
ArrayList<org.apache.cassandra.thrift.Column> columns = new ArrayList<org.apache.cassandra.thrift.Column>();
for (Tuple subcol : (DefaultDataBag) pair.get(1)) {
org.apache.cassandra.thrift.Column column = new org.apache.cassandra.thrift.Column();
column.name = objToBB(subcol.get(0));
column.value = objToBB(subcol.get(1));
column.setTimestamp(System.currentTimeMillis() * 1000);
columns.add(column);
}
if (// a deletion
columns.isEmpty()) {
mutation.deletion = new Deletion();
mutation.deletion.super_column = objToBB(pair.get(0));
mutation.deletion.setTimestamp(System.currentTimeMillis() * 1000);
} else {
sc.columns = columns;
mutation.column_or_supercolumn = new ColumnOrSuperColumn();
mutation.column_or_supercolumn.super_column = sc;
}
} else // assume column since it couldn't be anything else
{
if (pair.get(1) == null) {
mutation.deletion = new Deletion();
mutation.deletion.predicate = new org.apache.cassandra.thrift.SlicePredicate();
mutation.deletion.predicate.column_names = Arrays.asList(objToBB(pair.get(0)));
mutation.deletion.setTimestamp(System.currentTimeMillis() * 1000);
} else {
org.apache.cassandra.thrift.Column column = new org.apache.cassandra.thrift.Column();
column.name = marshallers.get(0).decompose((pair.get(0)));
if (validators.get(column.name) == null)
// Have to special case BytesType to convert DataByteArray into ByteBuffer
if (marshallers.get(1) instanceof BytesType)
column.value = objToBB(pair.get(1));
else
column.value = marshallers.get(1).decompose(pair.get(1));
else
column.value = validators.get(column.name).decompose(pair.get(1));
column.setTimestamp(System.currentTimeMillis() * 1000);
mutation.column_or_supercolumn = new ColumnOrSuperColumn();
mutation.column_or_supercolumn.column = column;
}
}
mutationList.add(mutation);
}
} catch (ClassCastException e) {
throw new IOException(e + " Output must be (key, {(column,value)...}) for ColumnFamily or (key, {supercolumn:{(column,value)...}...}) for SuperColumnFamily");
}
try {
writer.write(key, mutationList);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
use of org.apache.cassandra.db.SuperColumn in project eiger by wlloyd.
the class RowResolverTest method testResolveDeletedSuper.
@Test
public void testResolveDeletedSuper() {
// subcolumn is newer than a tombstone on its parent, but not newer than the row deletion
ColumnFamily scf1 = ColumnFamily.create("Keyspace1", "Super1");
SuperColumn sc = superColumn(scf1, "super-foo", column("one", "A", 1));
sc.delete((int) (System.currentTimeMillis() / 1000), 0);
scf1.addColumn(sc);
ColumnFamily scf2 = ColumnFamily.create("Keyspace1", "Super1");
scf2.delete((int) (System.currentTimeMillis() / 1000), 2);
ColumnFamily superResolved = RowRepairResolver.resolveSuperset(Arrays.asList(scf1, scf2));
// no columns in the cf
assertColumns(superResolved);
assertTrue(superResolved.isMarkedForDelete());
assertEquals(2, superResolved.getMarkedForDeleteAt());
}
use of org.apache.cassandra.db.SuperColumn in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedSuperColumn.
@Test
public void testCompactionPurgeTombstonedSuperColumn() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Super5";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key5");
RowMutation rm;
ByteBuffer scName = ByteBufferUtil.bytes("sc");
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes supercolumn with timestamp such that not all columns go
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, scName, null), 4);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
SuperColumn sc = (SuperColumn) cf.getColumn(scName);
assert sc != null;
assertEquals(10, sc.getColumnCount());
}
Aggregations