use of com.netflix.astyanax.model.Rows in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, sliceQuery.getLimit());
}
Rows<ByteBuffer, ByteBuffer> result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = // pre-fetch that many rows at a time
allRowsQuery.setRowLimit(storeManager.getPageSize()).setConcurrencyLevel(// one execution thread for fetching portion of rows
1).setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
// make 3 re-tries
return retries > 2;
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentBackendException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
use of com.netflix.astyanax.model.Rows in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery<K,C> should be parametrized as
* RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery<ByteBuffer,ByteBuffer> instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery<ByteBuffer,ByteBuffer>
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
// Add one for last column potentially removed in CassandraHelper.makeEntryList
final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
final int pageLimit = Math.min(this.readPageSize, queryLimit);
ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
// Don't directly chain due to ambiguity resolution; see top comment
rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
try {
r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
for (Row<ByteBuffer, ByteBuffer> row : rows) {
assert !result.containsKey(row.getKey());
final ByteBuffer key = row.getKey();
ColumnList<ByteBuffer> pageColumns = row.getColumns();
final List<Column<ByteBuffer>> queryColumns = new ArrayList();
Iterables.addAll(queryColumns, pageColumns);
while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
sliceStart = lastColumn.getName();
// No possibility of two values at the same column name, so start the
// next slice one bit after the last column found by the previous query.
// byte[] is little-endian
Integer position = null;
for (int i = sliceStart.array().length - 1; i >= 0; i--) {
if (sliceStart.array()[i] < Byte.MAX_VALUE) {
position = i;
sliceStart.array()[i]++;
break;
}
}
if (null == position) {
throw new PermanentBackendException("Column was not incrementable");
}
final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
// Don't directly chain due to ambiguity resolution; see top comment
pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<ColumnList<ByteBuffer>> pageResult;
try {
pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
if (Thread.interrupted()) {
throw new TraversalInterruptedException();
}
// Reset the incremented position to avoid leaking mutations up the
// stack to callers - sliceStart.array() in fact refers to a column name
// that will be later read to deserialize an edge (since we assigned it
// via de-referencing a column from the previous query).
sliceStart.array()[position]--;
pageColumns = pageResult.getResult();
Iterables.addAll(queryColumns, pageColumns);
}
result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
use of com.netflix.astyanax.model.Rows in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException {
// this query could only be done when byte-ordering partitioner is used
// because Cassandra operates on tokens internally which means that even contiguous
// range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens
// returning ambiguous results to the user.
Partitioner partitioner = storeManager.getPartitioner();
if (partitioner != Partitioner.BYTEORDER)
throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner.");
ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer();
RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeyRange(start, end, null, null, Integer.MAX_VALUE);
// Astyanax is bad at builder pattern :(
rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit());
// Omit final the query's key end from the result, if present in result
final Rows<ByteBuffer, ByteBuffer> r;
try {
r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer()));
return new RowIterator(i, query);
}
Aggregations