use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class ThriftValidationTest method testColumnNameEqualToKeyAlias.
@Test
public void testColumnNameEqualToKeyAlias() {
CFMetaData metaData = Schema.instance.getCFMetaData("Keyspace1", "Standard1");
CfDef newMetadata = metaData.toThrift();
boolean gotException = false;
// add a key_alias = "id"
newMetadata.setKey_alias(AsciiType.instance.decompose("id"));
// should not throw IRE here
try {
ThriftValidation.validateCfDef(newMetadata, metaData);
} catch (InvalidRequestException e) {
gotException = true;
}
assert !gotException : "got unexpected InvalidRequestException";
// add a column with name = "id"
newMetadata.addToColumn_metadata(new ColumnDef(UTF8Type.instance.decompose("id"), "org.apache.cassandra.db.marshal.UTF8Type"));
gotException = false;
try {
ThriftValidation.validateCfDef(newMetadata, metaData);
} catch (InvalidRequestException e) {
gotException = true;
}
assert gotException : "expected InvalidRequestException but not received.";
}
use of org.apache.cassandra.config.CFMetaData in project brisk by riptano.
the class SchemaManagerService method buildTable.
private Table buildTable(CfDef cfDef) {
Table table = new Table();
table.setDbName(cfDef.keyspace);
table.setTableName(cfDef.name);
table.setTableType(TableType.EXTERNAL_TABLE.toString());
table.putToParameters("EXTERNAL", "TRUE");
table.putToParameters("cassandra.ks.name", cfDef.keyspace);
table.putToParameters("cassandra.cf.name", cfDef.name);
table.putToParameters("cassandra.slice.predicate.size", "100");
table.putToParameters("storage_handler", "org.apache.hadoop.hive.cassandra.CassandraStorageHandler");
table.setPartitionKeys(new ArrayList<FieldSchema>());
// cassandra.column.mapping
StorageDescriptor sd = new StorageDescriptor();
sd.setInputFormat("org.apache.hadoop.hive.cassandra.input.HiveCassandraStandardColumnInputFormat");
sd.setOutputFormat("org.apache.hadoop.hive.cassandra.output.HiveCassandraOutputFormat");
sd.setParameters(new HashMap<String, String>());
try {
sd.setLocation(warehouse.getDefaultTablePath(cfDef.keyspace, cfDef.name).toString());
} catch (MetaException me) {
log.error("could not build path information correctly", me);
}
SerDeInfo serde = new SerDeInfo();
serde.setSerializationLib("org.apache.hadoop.hive.cassandra.serde.CassandraColumnSerDe");
serde.putToParameters("serialization.format", "1");
StringBuilder mapping = new StringBuilder();
StringBuilder validator = new StringBuilder();
try {
CFMetaData cfm = CFMetaData.fromThrift(cfDef);
AbstractType keyValidator = cfDef.key_validation_class != null ? TypeParser.parse(cfDef.key_validation_class) : BytesType.instance;
addTypeToStorageDescriptor(sd, ByteBufferUtil.bytes("row_key"), keyValidator, keyValidator);
mapping.append(":key");
validator.append(keyValidator.toString());
for (ColumnDef column : cfDef.getColumn_metadata()) {
addTypeToStorageDescriptor(sd, column.name, TypeParser.parse(cfDef.comparator_type), TypeParser.parse(column.getValidation_class()));
try {
mapping.append(",");
mapping.append(ByteBufferUtil.string(column.name));
validator.append(",");
validator.append(column.getValidation_class());
} catch (CharacterCodingException e) {
log.error("could not build column mapping correctly", e);
}
}
serde.putToParameters("cassandra.columns.mapping", mapping.toString());
serde.putToParameters("cassandra.cf.validatorType", validator.toString());
sd.setSerdeInfo(serde);
} catch (ConfigurationException ce) {
throw new CassandraHiveMetaStoreException("Problem converting comparator type: " + cfDef.comparator_type, ce);
} catch (InvalidRequestException ire) {
throw new CassandraHiveMetaStoreException("Problem parsing CfDef: " + cfDef.name, ire);
}
table.setSd(sd);
if (log.isDebugEnabled())
log.debug("constructed table for CF:{} {}", cfDef.name, table.toString());
return table;
}
use of org.apache.cassandra.config.CFMetaData in project brisk by riptano.
the class BriskErrorServer method describe_keyspace.
public KsDef describe_keyspace(String table) throws NotFoundException, InvalidRequestException, TException {
KSMetaData ksm = DatabaseDescriptor.getTableDefinition(table);
if (ksm == null)
throw new NotFoundException();
List<CfDef> cfDefs = new ArrayList<CfDef>();
for (CFMetaData cfm : ksm.cfMetaData().values()) cfDefs.add(CFMetaData.convertToThrift(cfm));
KsDef ksdef = new KsDef(ksm.name, ksm.strategyClass.getName(), cfDefs);
ksdef.setStrategy_options(ksm.strategyOptions);
return ksdef;
}
use of org.apache.cassandra.config.CFMetaData in project titan by thinkaurelius.
the class CassandraEmbeddedKeyColumnValueStore method getKeySlice.
/**
* Create a RangeSliceCommand and run it against the StorageProxy.
* <p>
* To match the behavior of the standard Cassandra thrift API endpoint, the
* {@code nowMillis} argument should be the number of milliseconds since the
* UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained
* through a {@link TimestampProvider}). This is per
* {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)},
* which passes the server's System.currentTimeMillis() to the
* {@code RangeSliceCommand} constructor.
*/
private List<Row> getKeySlice(Token start, Token end, @Nullable SliceQuery sliceQuery, int pageSize, long nowMillis) throws BackendException {
IPartitioner partitioner = StorageService.getPartitioner();
SliceRange columnSlice = new SliceRange();
if (sliceQuery == null) {
columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY).setFinish(ArrayUtils.EMPTY_BYTE_ARRAY).setCount(5);
} else {
columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer()).setFinish(sliceQuery.getSliceEnd().asByteBuffer()).setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE);
}
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice);
RowPosition startPosition = start.minKeyBound(partitioner);
RowPosition endPosition = end.minKeyBound(partitioner);
List<Row> rows;
try {
CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily);
IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null);
RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize);
rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM);
} catch (Exception e) {
throw new PermanentBackendException(e);
}
return rows;
}
use of org.apache.cassandra.config.CFMetaData in project eiger by wlloyd.
the class SSTableImport method addColumnsToCF.
/**
* Add columns to a column family.
*
* @param row the columns associated with a row
* @param superName name of the super column if any
* @param cfamily the column family to add columns to
*/
private static void addColumnsToCF(List<?> row, ByteBuffer superName, ColumnFamily cfamily) {
CFMetaData cfm = cfamily.metadata();
assert cfm != null;
for (Object c : row) {
JsonColumn col = new JsonColumn<List>((List) c, cfm, (superName != null));
QueryPath path = new QueryPath(cfm.cfName, superName, col.getName());
if (col.isExpiring()) {
cfamily.addColumn(null, new ExpiringColumn(col.getName(), col.getValue(), col.timestamp, col.ttl, col.localExpirationTime));
} else if (col.isCounter()) {
cfamily.addColumn(null, new CounterColumn(col.getName(), col.getValue(), col.timestamp, col.timestampOfLastDelete));
} else if (col.isDeleted()) {
cfamily.addTombstone(path, col.getValue(), col.timestamp);
} else {
cfamily.addColumn(path, col.getValue(), col.timestamp);
}
}
}
Aggregations