use of org.apache.cassandra.db.filter.QueryPath in project brisk by riptano.
the class CleanupHelper method insertData.
protected void insertData(String keyspace, String columnFamily, int offset, int numberOfRows) throws IOException {
for (int i = offset; i < offset + numberOfRows; i++) {
ByteBuffer key = ByteBufferUtil.bytes("key" + i);
RowMutation rowMutation = new RowMutation(keyspace, key);
QueryPath path = new QueryPath(columnFamily, null, ByteBufferUtil.bytes("col" + i));
rowMutation.add(path, ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
rowMutation.applyUnsafe();
}
}
use of org.apache.cassandra.db.filter.QueryPath in project titan by thinkaurelius.
the class CassandraEmbeddedKeyColumnValueStore method getInternal.
static ByteBuffer getInternal(String keyspace, String columnFamily, ByteBuffer key, ByteBuffer column, org.apache.cassandra.db.ConsistencyLevel cl) throws StorageException {
QueryPath slicePath = new QueryPath(columnFamily);
SliceByNamesReadCommand namesCmd = new SliceByNamesReadCommand(keyspace, key.duplicate(), slicePath, Arrays.asList(column.duplicate()));
List<Row> rows = read(namesCmd, cl);
if (null == rows || 0 == rows.size())
return null;
if (1 < rows.size())
throw new PermanentStorageException("Received " + rows.size() + " rows from a single-key-column cassandra read");
assert 1 == rows.size();
Row r = rows.get(0);
if (null == r) {
log.warn("Null Row object retrieved from Cassandra StorageProxy");
return null;
}
ColumnFamily cf = r.cf;
if (null == cf)
return null;
if (cf.isMarkedForDelete())
return null;
IColumn c = cf.getColumn(column.duplicate());
if (null == c)
return null;
// These came up during testing
if (c.isMarkedForDelete())
return null;
return org.apache.cassandra.utils.ByteBufferUtil.clone(c.value());
}
use of org.apache.cassandra.db.filter.QueryPath in project titan by thinkaurelius.
the class CassandraEmbeddedKeyColumnValueStore method containsKey.
@Override
public boolean containsKey(StaticBuffer key, StoreTransaction txh) throws StorageException {
QueryPath slicePath = new QueryPath(columnFamily);
// TODO key.asByteBuffer() may entail an unnecessary buffer copy
ReadCommand sliceCmd = new SliceFromReadCommand(// Keyspace name
keyspace, // Row key
key.asByteBuffer(), // ColumnFamily
slicePath, // Start column name (empty means begin at first result)
ByteBufferUtil.EMPTY_BYTE_BUFFER, // End column name (empty means max out the count)
ByteBufferUtil.EMPTY_BYTE_BUFFER, // Reverse results? (false=no)
false, // Max count of Columns to return
1);
List<Row> rows = read(sliceCmd, getTx(txh).getReadConsistencyLevel().getDBConsistency());
if (null == rows || 0 == rows.size())
return false;
/*
* Find at least one live column
*
* Note that the rows list may contain arbitrarily many
* marked-for-delete elements. Therefore, we can't assume that we're
* dealing with a singleton even though we set the maximum column count
* to 1.
*/
for (Row r : rows) {
if (null == r || null == r.cf)
continue;
if (r.cf.isMarkedForDelete())
continue;
for (IColumn ic : r.cf) if (!ic.isMarkedForDelete())
return true;
}
return false;
}
use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.
the class QueryProcessor method getSlice.
private static List<org.apache.cassandra.db.Row> getSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws InvalidRequestException, TimedOutException, UnavailableException {
QueryPath queryPath = new QueryPath(select.getColumnFamily());
List<ReadCommand> commands = new ArrayList<ReadCommand>();
// ...of a list of column names
if (!select.isColumnRange()) {
Collection<ByteBuffer> columnNames = getColumnNames(select, metadata, variables);
validateColumnNames(columnNames);
for (Term rawKey : select.getKeys()) {
ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(), variables);
validateKey(key);
commands.add(new SliceByNamesReadCommand(metadata.ksName, key, queryPath, columnNames));
}
} else // ...a range (slice) of column names
{
AbstractType<?> comparator = select.getComparator(metadata.ksName);
ByteBuffer start = select.getColumnStart().getByteBuffer(comparator, variables);
ByteBuffer finish = select.getColumnFinish().getByteBuffer(comparator, variables);
for (Term rawKey : select.getKeys()) {
ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(), variables);
validateKey(key);
validateSliceRange(metadata, start, finish, select.isColumnsReversed());
commands.add(new SliceFromReadCommand(metadata.ksName, key, queryPath, start, finish, select.isColumnsReversed(), select.getColumnsLimit()));
}
}
try {
return StorageProxy.read(commands, select.getConsistencyLevel());
} catch (TimeoutException e) {
throw new TimedOutException();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.
the class CommitLogTest method testDeleteIfNotDirty.
@Test
public void testDeleteIfNotDirty() throws Exception {
CommitLog.instance.resetUnsafe();
// Roughly 32 MB mutation
RowMutation rm = new RowMutation("Keyspace1", bytes("k"));
rm.add(new QueryPath("Standard1", null, bytes("c1")), ByteBuffer.allocate(32 * 1024 * 1024), 0);
// Adding it twice (won't change segment)
CommitLog.instance.add(rm);
CommitLog.instance.add(rm);
assert CommitLog.instance.activeSegments() == 1 : "Expecting 1 segment, got " + CommitLog.instance.activeSegments();
// "Flush": this won't delete anything
int cfid1 = rm.getColumnFamilyIds().iterator().next();
CommitLog.instance.discardCompletedSegments(cfid1, CommitLog.instance.getContext());
assert CommitLog.instance.activeSegments() == 1 : "Expecting 1 segment, got " + CommitLog.instance.activeSegments();
// Adding new mutation on another CF, large enough (including CL entry overhead) that a new segment is created
RowMutation rm2 = new RowMutation("Keyspace1", bytes("k"));
rm2.add(new QueryPath("Standard2", null, bytes("c1")), ByteBuffer.allocate(64 * 1024 * 1024), 0);
CommitLog.instance.add(rm2);
// also forces a new segment, since each entry-with-overhead is just over half the CL size
CommitLog.instance.add(rm2);
assert CommitLog.instance.activeSegments() == 3 : "Expecting 3 segments, got " + CommitLog.instance.activeSegments();
// "Flush" second cf: The first segment should be deleted since we
// didn't write anything on cf1 since last flush (and we flush cf2)
int cfid2 = rm2.getColumnFamilyIds().iterator().next();
CommitLog.instance.discardCompletedSegments(cfid2, CommitLog.instance.getContext());
// Assert we still have both our segment
assert CommitLog.instance.activeSegments() == 1 : "Expecting 1 segment, got " + CommitLog.instance.activeSegments();
}
Aggregations