use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class ThriftValidation method validateColumnData.
/**
* Validates the data part of the column (everything in the Column object but the name, which is assumed to be valid)
*/
public static void validateColumnData(CFMetaData metadata, Column column, boolean isSubColumn) throws InvalidRequestException {
validateTtl(column);
if (!column.isSetValue())
throw new InvalidRequestException("Column value is required");
if (!column.isSetTimestamp())
throw new InvalidRequestException("Column timestamp is required");
ColumnDefinition columnDef = metadata.getColumnDefinition(column.name);
try {
AbstractType validator = metadata.getValueValidator(columnDef);
if (validator != null)
validator.validate(column.value);
} catch (MarshalException me) {
if (logger.isDebugEnabled())
logger.debug("rejecting invalid value " + ByteBufferUtil.bytesToHex(summarize(column.value)));
throw new InvalidRequestException(String.format("(%s) [%s][%s][%s] failed validation", me.getMessage(), metadata.ksName, metadata.cfName, (isSubColumn ? metadata.subcolumnComparator : metadata.comparator).getString(column.name)));
}
// Indexed column values cannot be larger than 64K. See CASSANDRA-3057 for more details
if (columnDef != null && columnDef.getIndexType() != null && column.value.remaining() > FBUtilities.MAX_UNSIGNED_SHORT)
throw new InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s", column.value.remaining(), columnDef.getIndexName(), metadata.cfName, metadata.ksName));
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class SSTableImport method addToSuperCF.
/**
* Add super columns to a column family.
*
* @param row the super columns associated with a row
* @param cfamily the column family to add columns to
*/
private static void addToSuperCF(Map<?, ?> row, ColumnFamily cfamily) {
CFMetaData metaData = cfamily.metadata();
assert metaData != null;
AbstractType comparator = metaData.comparator;
// Super columns
for (Map.Entry<?, ?> entry : row.entrySet()) {
Map<?, ?> data = (Map<?, ?>) entry.getValue();
addColumnsToCF((List<?>) data.get("subColumns"), stringAsType((String) entry.getKey(), comparator), cfamily);
// *WARNING* markForDeleteAt has been DEPRECATED at Cassandra side
// BigInteger deletedAt = (BigInteger) data.get("deletedAt");
// SuperColumn superColumn = (SuperColumn) cfamily.getColumn(superName);
// superColumn.markForDeleteAt((int) (System.currentTimeMillis()/1000), deletedAt);
}
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class SSTableNamesIterator method readIndexedColumns.
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList) throws IOException {
// column count
file.readInt();
/* get the various column ranges we have to read */
AbstractType comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames) {
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges) {
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width) {
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name())) {
cf.addColumn(column);
}
}
}
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class ColumnDefinition method fromAvro.
public static ColumnDefinition fromAvro(org.apache.cassandra.db.migration.avro.ColumnDef cd) {
IndexType index_type = cd.index_type == null ? null : Enum.valueOf(IndexType.class, cd.index_type.name());
String index_name = cd.index_name == null ? null : cd.index_name.toString();
try {
AbstractType validatorType = TypeParser.parse(cd.validation_class);
return new ColumnDefinition(ByteBufferUtil.clone(cd.name), validatorType, index_type, getStringMap(cd.index_options), index_name);
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class SSTableExport method serializeRow.
/**
* Get portion of the columns and serialize in loop while not more columns left in the row
* @param row SSTableIdentityIterator row representation with Column Family
* @param key Decorated Key for the required row
* @param out output stream
*/
private static void serializeRow(SSTableIdentityIterator row, DecoratedKey key, PrintStream out) {
ColumnFamily columnFamily = row.getColumnFamily();
boolean isSuperCF = columnFamily.isSuper();
CFMetaData cfMetaData = columnFamily.metadata();
AbstractType comparator = columnFamily.getComparator();
writeKey(out, bytesToHex(key.key));
out.print(isSuperCF ? "{" : "[");
if (isSuperCF) {
while (row.hasNext()) {
IColumn column = row.next();
writeKey(out, comparator.getString(column.name()));
out.print("{");
writeKey(out, "deletedAt");
out.print(column.getMarkedForDeleteAt());
out.print(", ");
writeKey(out, "subColumns");
out.print("[");
serializeColumns(column.getSubColumns().iterator(), out, columnFamily.getSubComparator(), cfMetaData);
out.print("]");
out.print("}");
if (row.hasNext())
out.print(", ");
}
} else {
serializeColumns(row, out, comparator, cfMetaData);
}
out.print(isSuperCF ? "}" : "]");
}
Aggregations