use of com.netflix.astyanax.query.RowQuery in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery<K,C> should be parametrized as
* RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery<ByteBuffer,ByteBuffer> instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery<ByteBuffer,ByteBuffer>
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
// Add one for last column potentially removed in CassandraHelper.makeEntryList
final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
final int pageLimit = Math.min(this.readPageSize, queryLimit);
ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
// Don't directly chain due to ambiguity resolution; see top comment
rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
try {
r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
for (Row<ByteBuffer, ByteBuffer> row : rows) {
assert !result.containsKey(row.getKey());
final ByteBuffer key = row.getKey();
ColumnList<ByteBuffer> pageColumns = row.getColumns();
final List<Column<ByteBuffer>> queryColumns = new ArrayList();
Iterables.addAll(queryColumns, pageColumns);
while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
sliceStart = lastColumn.getName();
// No possibility of two values at the same column name, so start the
// next slice one bit after the last column found by the previous query.
// byte[] is little-endian
Integer position = null;
for (int i = sliceStart.array().length - 1; i >= 0; i--) {
if (sliceStart.array()[i] < Byte.MAX_VALUE) {
position = i;
sliceStart.array()[i]++;
break;
}
}
if (null == position) {
throw new PermanentBackendException("Column was not incrementable");
}
final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
// Don't directly chain due to ambiguity resolution; see top comment
pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<ColumnList<ByteBuffer>> pageResult;
try {
pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
if (Thread.interrupted()) {
throw new TraversalInterruptedException();
}
// Reset the incremented position to avoid leaking mutations up the
// stack to callers - sliceStart.array() in fact refers to a column name
// that will be later read to deserialize an edge (since we assigned it
// via de-referencing a column from the previous query).
sliceStart.array()[position]--;
pageColumns = pageResult.getResult();
Iterables.addAll(queryColumns, pageColumns);
}
result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
Aggregations