use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.
the class CassandraServer method internal_insert.
private void internal_insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level, Set<Dep> deps) throws InvalidRequestException, UnavailableException, TimedOutException {
state().hasColumnFamilyAccess(column_parent.column_family, Permission.WRITE);
CFMetaData metadata = ThriftValidation.validateColumnFamily(state().getKeyspace(), column_parent.column_family, false);
ThriftValidation.validateKey(metadata, key);
ThriftValidation.validateColumnParent(metadata, column_parent);
// SuperColumn field is usually optional, but not when we're inserting
if (metadata.cfType == ColumnFamilyType.Super && column_parent.super_column == null) {
throw new InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
}
ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
ThriftValidation.validateColumnData(metadata, column, column_parent.super_column != null);
// be 0 when sent to us, and we'll set it here.
if (column.timestamp == 0) {
column.timestamp = LamportClock.getVersion();
logger.debug("Setting timestamp to {}", column.timestamp);
}
Set<Dependency> dependencies = new HashSet<Dependency>();
for (Dep dep : deps) {
dependencies.add(new Dependency(dep));
}
RowMutation rm = new RowMutation(state().getKeyspace(), key, dependencies);
try {
rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
} catch (MarshalException e) {
throw new InvalidRequestException(e.getMessage());
}
doInsert(consistency_level, Arrays.asList(rm));
}
use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.
the class Migration method getLastMigrationId.
public static UUID getLastMigrationId() {
DecoratedKey<?> dkey = StorageService.getPartitioner().decorateKey(LAST_MIGRATION_KEY);
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(SCHEMA_CF);
QueryFilter filter = QueryFilter.getNamesFilter(dkey, new QueryPath(SCHEMA_CF), LAST_MIGRATION_KEY);
ColumnFamily cf = cfStore.getColumnFamily(filter);
if (cf == null || cf.getColumnNames().size() == 0)
return null;
else
return UUIDGen.getUUID(cf.getColumn(LAST_MIGRATION_KEY).value());
}
use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.
the class SSTableExportTest method testEnumeratekeys.
@Test
public void testEnumeratekeys() throws IOException {
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
ColumnFamily cfamily = ColumnFamily.create("Keyspace1", "Standard1");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
// Add rowA
cfamily.addColumn(new QueryPath("Standard1", null, ByteBufferUtil.bytes("colA")), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
writer.append(Util.dk("rowA"), cfamily);
cfamily.clear();
// Add rowB
cfamily.addColumn(new QueryPath("Standard1", null, ByteBufferUtil.bytes("colB")), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
writer.append(Util.dk("rowB"), cfamily);
cfamily.clear();
writer.closeAndOpenReader();
// Enumerate and verify
File temp = File.createTempFile("Standard1", ".txt");
SSTableExport.enumeratekeys(writer.getFilename(), new PrintStream(temp.getPath()));
FileReader file = new FileReader(temp);
char[] buf = new char[(int) temp.length()];
file.read(buf);
String output = new String(buf);
String sep = System.getProperty("line.separator");
assert output.equals(asHex("rowA") + sep + asHex("rowB") + sep) : output;
}
use of org.apache.cassandra.db.filter.QueryPath in project titan by thinkaurelius.
the class CassandraEmbeddedStoreManager method mutateMany.
/*
* This implementation can't handle counter columns.
*
* The private method internal_batch_mutate in CassandraServer as of 1.2.0
* provided most of the following method after transaction handling.
*/
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws StorageException {
Preconditions.checkNotNull(mutations);
final Timestamp timestamp = getTimestamp(txh);
int size = 0;
for (Map<StaticBuffer, KCVMutation> mutation : mutations.values()) size += mutation.size();
Map<StaticBuffer, RowMutation> rowMutations = new HashMap<StaticBuffer, RowMutation>(size);
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> mutEntry : mutations.entrySet()) {
String columnFamily = mutEntry.getKey();
for (Map.Entry<StaticBuffer, KCVMutation> titanMutation : mutEntry.getValue().entrySet()) {
StaticBuffer key = titanMutation.getKey();
KCVMutation mut = titanMutation.getValue();
RowMutation rm = rowMutations.get(key);
if (rm == null) {
rm = new RowMutation(keySpaceName, key.asByteBuffer());
rowMutations.put(key, rm);
}
if (mut.hasAdditions()) {
for (Entry e : mut.getAdditions()) {
// TODO are these asByteBuffer() calls too expensive?
QueryPath path = new QueryPath(columnFamily, null, e.getColumn().asByteBuffer());
rm.add(path, e.getValue().asByteBuffer(), timestamp.additionTime);
}
}
if (mut.hasDeletions()) {
for (StaticBuffer col : mut.getDeletions()) {
QueryPath path = new QueryPath(columnFamily, null, col.asByteBuffer());
rm.delete(path, timestamp.deletionTime);
}
}
}
}
mutate(new ArrayList<RowMutation>(rowMutations.values()), getTx(txh).getWriteConsistencyLevel().getDBConsistency());
}
use of org.apache.cassandra.db.filter.QueryPath in project titan by thinkaurelius.
the class CassandraEmbeddedKeyColumnValueStore method getSlice.
@Override
public List<Entry> getSlice(KeySliceQuery query, StoreTransaction txh) throws StorageException {
QueryPath slicePath = new QueryPath(columnFamily);
ReadCommand sliceCmd = new SliceFromReadCommand(// Keyspace name
keyspace, // Row key
query.getKey().asByteBuffer(), // ColumnFamily
slicePath, // Start column name (empty means begin at first result)
query.getSliceStart().asByteBuffer(), // End column name (empty means max out the count)
query.getSliceEnd().asByteBuffer(), // Reverse results? (false=no)
false, // Max count of Columns to return
query.getLimit());
List<Row> slice = read(sliceCmd, getTx(txh).getReadConsistencyLevel().getDBConsistency());
if (null == slice || 0 == slice.size())
return new ArrayList<Entry>(0);
int sliceSize = slice.size();
if (1 < sliceSize)
throw new PermanentStorageException("Received " + sliceSize + " rows for single key");
Row r = slice.get(0);
if (null == r) {
log.warn("Null Row object retrieved from Cassandra StorageProxy");
return new ArrayList<Entry>(0);
}
ColumnFamily cf = r.cf;
if (null == cf) {
log.debug("null ColumnFamily (\"{}\")", columnFamily);
return new ArrayList<Entry>(0);
}
if (cf.isMarkedForDelete())
return new ArrayList<Entry>(0);
return cfToEntries(cf, query.getSliceEnd());
}
Aggregations