use of org.apache.cassandra.db.IColumn in project brisk by riptano.
the class BriskDBUtil method validateAndGetColumn.
/**
* Validates that the result is not empty and get the value for the <code>columnName</code> column.
* @param rows the raw result from StorageProxy.read(....)
* @param columnName column name
* @return the Column that was requested if it exists.
* @throws NotFoundException if the result doesn't exist (including if the value holds a tumbstone)
*/
public static IColumn validateAndGetColumn(List<Row> rows, ByteBuffer columnName) throws NotFoundException {
if (rows.isEmpty())
throw new NotFoundException();
if (rows.size() > 1)
throw new RuntimeException("Block id returned more than one row");
Row row = rows.get(0);
if (row.cf == null)
throw new NotFoundException();
IColumn col = row.cf.getColumn(columnName);
if (col == null || !col.isLive())
throw new NotFoundException();
return col;
}
use of org.apache.cassandra.db.IColumn in project brisk by riptano.
the class CassandraStorage method columnToTuple.
private Tuple columnToTuple(ByteBuffer name, IColumn col, CfDef cfDef) throws IOException {
Tuple pair = TupleFactory.getInstance().newTuple(2);
List<AbstractType> marshallers = getDefaultMarshallers(cfDef);
Map<ByteBuffer, AbstractType> validators = getValidatorMap(cfDef);
if (col instanceof Column) {
// standard
pair.set(0, marshallers.get(0).compose(name));
if (validators.get(name) == null)
// Have to special case BytesType because compose returns a ByteBuffer
if (marshallers.get(1) instanceof BytesType)
pair.set(1, new DataByteArray(ByteBufferUtil.getArray(col.value())));
else
pair.set(1, marshallers.get(1).compose(col.value()));
else
pair.set(1, validators.get(name).compose(col.value()));
return pair;
}
// super
ArrayList<Tuple> subcols = new ArrayList<Tuple>();
for (IColumn subcol : col.getSubColumns()) subcols.add(columnToTuple(subcol.name(), subcol, cfDef));
pair.set(1, new DefaultDataBag(subcols));
return pair;
}
use of org.apache.cassandra.db.IColumn in project eiger by wlloyd.
the class CassandraStorage method getNext.
@Override
public Tuple getNext() throws IOException {
try {
// load the next pair
if (!reader.nextKeyValue())
return null;
CfDef cfDef = getCfDef(loadSignature);
ByteBuffer key = (ByteBuffer) reader.getCurrentKey();
SortedMap<ByteBuffer, IColumn> cf = (SortedMap<ByteBuffer, IColumn>) reader.getCurrentValue();
assert key != null && cf != null;
// and wrap it in a tuple
Tuple tuple = TupleFactory.getInstance().newTuple(2);
ArrayList<Tuple> columns = new ArrayList<Tuple>();
tuple.set(0, new DataByteArray(key.array(), key.position() + key.arrayOffset(), key.limit() + key.arrayOffset()));
for (Map.Entry<ByteBuffer, IColumn> entry : cf.entrySet()) {
columns.add(columnToTuple(entry.getKey(), entry.getValue(), cfDef));
}
tuple.set(1, new DefaultDataBag(columns));
return tuple;
} catch (InterruptedException e) {
throw new IOException(e.getMessage());
}
}
use of org.apache.cassandra.db.IColumn in project eiger by wlloyd.
the class SSTableNamesIterator method readSimpleColumns.
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames) throws IOException {
int columns = file.readInt();
int n = 0;
for (int i = 0; i < columns; i++) {
IColumn column = cf.getColumnSerializer().deserialize(file);
if (columnNames.contains(column.name())) {
cf.addColumn(column);
if (n++ > filteredColumnNames.size())
break;
}
}
}
use of org.apache.cassandra.db.IColumn in project eiger by wlloyd.
the class SSTableNamesIterator method readIndexedColumns.
private void readIndexedColumns(CFMetaData metadata, FileDataInput file, SortedSet<ByteBuffer> columnNames, List<ByteBuffer> filteredColumnNames, List<IndexHelper.IndexInfo> indexList) throws IOException {
// column count
file.readInt();
/* get the various column ranges we have to read */
AbstractType comparator = metadata.comparator;
SortedSet<IndexHelper.IndexInfo> ranges = new TreeSet<IndexHelper.IndexInfo>(IndexHelper.getComparator(comparator, false));
for (ByteBuffer name : filteredColumnNames) {
int index = IndexHelper.indexFor(name, indexList, comparator, false);
if (index == indexList.size())
continue;
IndexHelper.IndexInfo indexInfo = indexList.get(index);
if (comparator.compare(name, indexInfo.firstName) < 0)
continue;
ranges.add(indexInfo);
}
FileMark mark = file.mark();
for (IndexHelper.IndexInfo indexInfo : ranges) {
file.reset(mark);
FileUtils.skipBytesFully(file, indexInfo.offset);
// TODO only completely deserialize columns we are interested in
while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width) {
IColumn column = cf.getColumnSerializer().deserialize(file);
// we check vs the original Set, not the filtered List, for efficiency
if (columnNames.contains(column.name())) {
cf.addColumn(column);
}
}
}
}
Aggregations