use of org.apache.cassandra.db.marshal.BytesType in project eiger by wlloyd.
the class DefsTest method testUpdateColumnFamilyNoIndexes.
@Test
public void testUpdateColumnFamilyNoIndexes() throws ConfigurationException, IOException, ExecutionException, InterruptedException {
// create a keyspace with a cf to update.
CFMetaData cf = addTestCF("UpdatedCfKs", "Standard1added", "A new cf that will be updated");
KSMetaData ksm = KSMetaData.testMetadata(cf.ksName, SimpleStrategy.class, KSMetaData.optsWithRF(1), cf);
new AddKeyspace(ksm).apply();
assert Schema.instance.getTableDefinition(cf.ksName) != null;
assert Schema.instance.getTableDefinition(cf.ksName) == ksm;
assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName) != null;
// updating certain fields should fail.
org.apache.cassandra.db.migration.avro.CfDef cf_def = cf.toAvro();
cf_def.column_metadata = new ArrayList<org.apache.cassandra.db.migration.avro.ColumnDef>();
cf_def.default_validation_class = "BytesType";
cf_def.min_compaction_threshold = 5;
cf_def.max_compaction_threshold = 31;
// test valid operations.
cf_def.comment = "Modified comment";
// doesn't get set back here.
new UpdateColumnFamily(cf_def).apply();
cf_def.read_repair_chance = 0.23;
new UpdateColumnFamily(cf_def).apply();
cf_def.gc_grace_seconds = 12;
new UpdateColumnFamily(cf_def).apply();
cf_def.default_validation_class = "UTF8Type";
new UpdateColumnFamily(cf_def).apply();
cf_def.min_compaction_threshold = 3;
new UpdateColumnFamily(cf_def).apply();
cf_def.max_compaction_threshold = 33;
new UpdateColumnFamily(cf_def).apply();
// check the cumulative affect.
assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName).getComment().equals(cf_def.comment);
assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName).getReadRepairChance() == cf_def.read_repair_chance;
assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName).getGcGraceSeconds() == cf_def.gc_grace_seconds;
assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName).getDefaultValidator() == UTF8Type.instance;
// todo: we probably don't need to reset old values in the catches anymore.
// make sure some invalid operations fail.
int oldId = cf_def.id;
try {
cf_def.id++;
cf.apply(cf_def);
throw new AssertionError("Should have blown up when you used a different id.");
} catch (ConfigurationException expected) {
cf_def.id = oldId;
}
CharSequence oldStr = cf_def.name;
try {
cf_def.name = cf_def.name + "_renamed";
cf.apply(cf_def);
throw new AssertionError("Should have blown up when you used a different name.");
} catch (ConfigurationException expected) {
cf_def.name = oldStr;
}
oldStr = cf_def.keyspace;
try {
cf_def.keyspace = oldStr + "_renamed";
cf.apply(cf_def);
throw new AssertionError("Should have blown up when you used a different keyspace.");
} catch (ConfigurationException expected) {
cf_def.keyspace = oldStr;
}
try {
cf_def.column_type = ColumnFamilyType.Super.name();
cf.apply(cf_def);
throw new AssertionError("Should have blwon up when you used a different cf type.");
} catch (ConfigurationException expected) {
cf_def.column_type = ColumnFamilyType.Standard.name();
}
oldStr = cf_def.comparator_type;
try {
cf_def.comparator_type = BytesType.class.getSimpleName();
cf.apply(cf_def);
throw new AssertionError("Should have blown up when you used a different comparator.");
} catch (ConfigurationException expected) {
cf_def.comparator_type = UTF8Type.class.getSimpleName();
}
try {
cf_def.min_compaction_threshold = 34;
cf.apply(cf_def);
throw new AssertionError("Should have blown up when min > max.");
} catch (ConfigurationException expected) {
cf_def.min_compaction_threshold = 3;
}
try {
cf_def.max_compaction_threshold = 2;
cf.apply(cf_def);
throw new AssertionError("Should have blown up when max > min.");
} catch (ConfigurationException expected) {
cf_def.max_compaction_threshold = 33;
}
}
use of org.apache.cassandra.db.marshal.BytesType in project brisk by riptano.
the class CassandraStorage method putNext.
public void putNext(Tuple t) throws ExecException, IOException {
ByteBuffer key = objToBB(t.get(0));
DefaultDataBag pairs = (DefaultDataBag) t.get(1);
ArrayList<Mutation> mutationList = new ArrayList<Mutation>();
CfDef cfDef = getCfDef();
List<AbstractType> marshallers = getDefaultMarshallers(cfDef);
Map<ByteBuffer, AbstractType> validators = getValidatorMap(cfDef);
try {
for (Tuple pair : pairs) {
Mutation mutation = new Mutation();
if (// supercolumn
DataType.findType(pair.get(1)) == DataType.BAG) {
org.apache.cassandra.thrift.SuperColumn sc = new org.apache.cassandra.thrift.SuperColumn();
sc.name = objToBB(pair.get(0));
ArrayList<org.apache.cassandra.thrift.Column> columns = new ArrayList<org.apache.cassandra.thrift.Column>();
for (Tuple subcol : (DefaultDataBag) pair.get(1)) {
org.apache.cassandra.thrift.Column column = new org.apache.cassandra.thrift.Column();
column.name = objToBB(subcol.get(0));
column.value = objToBB(subcol.get(1));
column.setTimestamp(System.currentTimeMillis() * 1000);
columns.add(column);
}
if (// a deletion
columns.isEmpty()) {
mutation.deletion = new Deletion();
mutation.deletion.super_column = objToBB(pair.get(0));
mutation.deletion.setTimestamp(System.currentTimeMillis() * 1000);
} else {
sc.columns = columns;
mutation.column_or_supercolumn = new ColumnOrSuperColumn();
mutation.column_or_supercolumn.super_column = sc;
}
} else // assume column since it couldn't be anything else
{
if (pair.get(1) == null) {
mutation.deletion = new Deletion();
mutation.deletion.predicate = new org.apache.cassandra.thrift.SlicePredicate();
mutation.deletion.predicate.column_names = Arrays.asList(objToBB(pair.get(0)));
mutation.deletion.setTimestamp(System.currentTimeMillis() * 1000);
} else {
org.apache.cassandra.thrift.Column column = new org.apache.cassandra.thrift.Column();
column.name = marshallers.get(0).decompose((pair.get(0)));
if (validators.get(column.name) == null)
// Have to special case BytesType to convert DataByteArray into ByteBuffer
if (marshallers.get(1) instanceof BytesType)
column.value = objToBB(pair.get(1));
else
column.value = marshallers.get(1).decompose(pair.get(1));
else
column.value = validators.get(column.name).decompose(pair.get(1));
column.setTimestamp(System.currentTimeMillis() * 1000);
mutation.column_or_supercolumn = new ColumnOrSuperColumn();
mutation.column_or_supercolumn.column = column;
}
}
mutationList.add(mutation);
}
} catch (ClassCastException e) {
throw new IOException(e + " Output must be (key, {(column,value)...}) for ColumnFamily or (key, {supercolumn:{(column,value)...}...}) for SuperColumnFamily");
}
try {
writer.write(key, mutationList);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
use of org.apache.cassandra.db.marshal.BytesType in project brisk by riptano.
the class CassandraStorage method columnToTuple.
private Tuple columnToTuple(ByteBuffer name, IColumn col, CfDef cfDef) throws IOException {
Tuple pair = TupleFactory.getInstance().newTuple(2);
List<AbstractType> marshallers = getDefaultMarshallers(cfDef);
Map<ByteBuffer, AbstractType> validators = getValidatorMap(cfDef);
if (col instanceof Column) {
// standard
pair.set(0, marshallers.get(0).compose(name));
if (validators.get(name) == null)
// Have to special case BytesType because compose returns a ByteBuffer
if (marshallers.get(1) instanceof BytesType)
pair.set(1, new DataByteArray(ByteBufferUtil.getArray(col.value())));
else
pair.set(1, marshallers.get(1).compose(col.value()));
else
pair.set(1, validators.get(name).compose(col.value()));
return pair;
}
// super
ArrayList<Tuple> subcols = new ArrayList<Tuple>();
for (IColumn subcol : col.getSubColumns()) subcols.add(columnToTuple(subcol.name(), subcol, cfDef));
pair.set(1, new DefaultDataBag(subcols));
return pair;
}
Aggregations