use of org.apache.cassandra.thrift.ConsistencyLevel in project janusgraph by JanusGraph.
the class CassandraThriftKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
ColumnParent parent = new ColumnParent(columnFamily);
/*
* Cassandra cannot handle columnStart = columnEnd.
* Cassandra's Thrift getSlice() throws InvalidRequestException
* if columnStart = columnEnd.
*/
if (query.getSliceStart().compareTo(query.getSliceEnd()) >= 0) {
// Check for invalid arguments where columnEnd < columnStart
if (query.getSliceEnd().compareTo(query.getSliceStart()) < 0) {
throw new PermanentBackendException("columnStart=" + query.getSliceStart() + " is greater than columnEnd=" + query.getSliceEnd() + ". " + "columnStart must be less than or equal to columnEnd");
}
if (0 != query.getSliceStart().length() && 0 != query.getSliceEnd().length()) {
logger.debug("Return empty list due to columnEnd==columnStart and neither empty");
return KCVSUtil.emptyResults(keys);
}
}
assert query.getSliceStart().compareTo(query.getSliceEnd()) < 0;
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThrift();
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
// Add one for potentially removed last column
range.setCount(query.getLimit() + (query.hasLimit() ? 1 : 0));
range.setStart(query.getSliceStart().asByteBuffer());
range.setFinish(query.getSliceEnd().asByteBuffer());
predicate.setSlice_range(range);
CTConnection conn = null;
try {
conn = pool.borrowObject(keyspace);
Cassandra.Client client = conn.getClient();
final Map<ByteBuffer, List<ColumnOrSuperColumn>> rows = client.multiget_slice(convert(keys), parent, predicate, consistency);
/*
* The final size of the "result" List may be at most rows.size().
* However, "result" could also be up to two elements smaller than
* rows.size(), depending on startInclusive and endInclusive
*/
return rows.entrySet().stream().collect(Collectors.toMap(e -> StaticArrayBuffer.of(e.getKey()), e -> makeEntryList(e.getValue(), entryGetter, query.getSliceEnd(), query.getLimit())));
} catch (Exception e) {
throw convertException(e);
} finally {
pool.returnObjectUnsafe(keyspace, conn);
}
}
use of org.apache.cassandra.thrift.ConsistencyLevel in project atlasdb by palantir.
the class SchemaMutationLock method queryExistingLockColumn.
private Optional<Column> queryExistingLockColumn(CassandraClient client) throws TException {
TableReference lockTableRef = lockTable.get();
Column existingColumn = null;
ConsistencyLevel localQuorum = ConsistencyLevel.LOCAL_QUORUM;
try {
ColumnOrSuperColumn result = queryRunner.run(client, lockTableRef, () -> client.get(lockTableRef, getGlobalDdlLockRowName(), getGlobalDdlLockColumnName(), localQuorum));
existingColumn = result.getColumn();
} catch (UnavailableException e) {
throw new InsufficientConsistencyException("Checking the schema lock requires " + localQuorum + " Cassandra nodes to be up and available.", e);
} catch (NotFoundException e) {
log.debug("No existing schema lock found in table [{}]", SafeArg.of("tableName", lockTableRef));
}
return Optional.ofNullable(existingColumn);
}
use of org.apache.cassandra.thrift.ConsistencyLevel in project titan by thinkaurelius.
the class CassandraThriftKeyColumnValueStore method containsKey.
@Override
public boolean containsKey(StaticBuffer key, StoreTransaction txh) throws StorageException {
ColumnParent parent = new ColumnParent(columnFamily);
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThriftConsistency();
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setCount(1);
byte[] empty = new byte[0];
range.setStart(empty);
range.setFinish(empty);
predicate.setSlice_range(range);
CTConnection conn = null;
try {
conn = pool.borrowObject(keyspace);
Cassandra.Client client = conn.getClient();
List<?> result = client.get_slice(key.asByteBuffer(), parent, predicate, consistency);
return 0 < result.size();
} catch (Exception e) {
throw convertException(e);
} finally {
pool.returnObjectUnsafe(keyspace, conn);
}
}
use of org.apache.cassandra.thrift.ConsistencyLevel in project titan by thinkaurelius.
the class CassandraThriftKeyColumnValueStore method getNamesSlice.
public Map<ByteBuffer, List<Entry>> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws StorageException {
Preconditions.checkArgument(query.getLimit() >= 0);
if (0 == query.getLimit())
return Collections.emptyMap();
ColumnParent parent = new ColumnParent(columnFamily);
/*
* Cassandra cannot handle columnStart = columnEnd.
* Cassandra's Thrift getSlice() throws InvalidRequestException
* if columnStart = columnEnd.
*/
if (ByteBufferUtil.compare(query.getSliceStart(), query.getSliceEnd()) >= 0) {
// Check for invalid arguments where columnEnd < columnStart
if (ByteBufferUtil.isSmallerThan(query.getSliceEnd(), query.getSliceStart())) {
throw new PermanentStorageException("columnStart=" + query.getSliceStart() + " is greater than columnEnd=" + query.getSliceEnd() + ". " + "columnStart must be less than or equal to columnEnd");
}
if (0 != query.getSliceStart().length() && 0 != query.getSliceEnd().length()) {
logger.debug("Return empty list due to columnEnd==columnStart and neither empty");
return Collections.emptyMap();
}
}
// true: columnStart < columnEnd
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThriftConsistency();
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setCount(query.getLimit());
range.setStart(query.getSliceStart().asByteBuffer());
range.setFinish(query.getSliceEnd().asByteBuffer());
predicate.setSlice_range(range);
CTConnection conn = null;
try {
conn = pool.borrowObject(keyspace);
Cassandra.Client client = conn.getClient();
List<ByteBuffer> requestKeys = new ArrayList<ByteBuffer>(keys.size());
{
for (StaticBuffer key : keys) {
requestKeys.add(key.asByteBuffer());
}
}
Map<ByteBuffer, List<ColumnOrSuperColumn>> rows = client.multiget_slice(requestKeys, parent, predicate, consistency);
/*
* The final size of the "result" List may be at most rows.size().
* However, "result" could also be up to two elements smaller than
* rows.size(), depending on startInclusive and endInclusive
*/
Map<ByteBuffer, List<Entry>> results = new HashMap<ByteBuffer, List<Entry>>();
ByteBuffer sliceEndBB = query.getSliceEnd().asByteBuffer();
for (ByteBuffer key : rows.keySet()) {
results.put(key, excludeLastColumn(rows.get(key), sliceEndBB));
}
return results;
} catch (Exception e) {
throw convertException(e);
} finally {
pool.returnObjectUnsafe(keyspace, conn);
}
}
use of org.apache.cassandra.thrift.ConsistencyLevel in project titan by thinkaurelius.
the class CassandraThriftStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws StorageException {
Preconditions.checkNotNull(mutations);
final Timestamp timestamp = getTimestamp(txh);
ConsistencyLevel consistency = getTx(txh).getWriteConsistencyLevel().getThriftConsistency();
// Generate Thrift-compatible batch_mutate() datastructure
// key -> cf -> cassmutation
int size = 0;
for (Map<StaticBuffer, KCVMutation> mutation : mutations.values()) size += mutation.size();
Map<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>> batch = new HashMap<ByteBuffer, Map<String, List<org.apache.cassandra.thrift.Mutation>>>(size);
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> keyMutation : mutations.entrySet()) {
String columnFamily = keyMutation.getKey();
for (Map.Entry<StaticBuffer, KCVMutation> mutEntry : keyMutation.getValue().entrySet()) {
StaticBuffer key = mutEntry.getKey();
ByteBuffer keyBB = key.asByteBuffer();
// Get or create the single Cassandra Mutation object responsible for this key
Map<String, List<org.apache.cassandra.thrift.Mutation>> cfmutation = batch.get(keyBB);
if (cfmutation == null) {
// TODO where did the magic number 3 come from?
cfmutation = new HashMap<String, List<org.apache.cassandra.thrift.Mutation>>(3);
batch.put(keyBB, cfmutation);
}
KCVMutation mutation = mutEntry.getValue();
List<org.apache.cassandra.thrift.Mutation> thriftMutation = new ArrayList<org.apache.cassandra.thrift.Mutation>(mutations.size());
if (mutation.hasDeletions()) {
for (StaticBuffer buf : mutation.getDeletions()) {
Deletion d = new Deletion();
SlicePredicate sp = new SlicePredicate();
sp.addToColumn_names(buf.asByteBuffer());
d.setPredicate(sp);
d.setTimestamp(timestamp.deletionTime);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setDeletion(d);
thriftMutation.add(m);
}
}
if (mutation.hasAdditions()) {
for (Entry ent : mutation.getAdditions()) {
ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
Column column = new Column(ent.getColumn().asByteBuffer());
column.setValue(ent.getValue().asByteBuffer());
column.setTimestamp(timestamp.additionTime);
cosc.setColumn(column);
org.apache.cassandra.thrift.Mutation m = new org.apache.cassandra.thrift.Mutation();
m.setColumn_or_supercolumn(cosc);
thriftMutation.add(m);
}
}
cfmutation.put(columnFamily, thriftMutation);
}
}
CTConnection conn = null;
try {
conn = pool.borrowObject(keySpaceName);
Cassandra.Client client = conn.getClient();
client.batch_mutate(batch, consistency);
} catch (Exception ex) {
throw CassandraThriftKeyColumnValueStore.convertException(ex);
} finally {
pool.returnObjectUnsafe(keySpaceName, conn);
}
}
Aggregations