use of com.netflix.astyanax.query.AllRowsQuery in project titan by thinkaurelius.
the class AstyanaxKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, sliceQuery.getLimit());
}
Rows<ByteBuffer, ByteBuffer> result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = // pre-fetch that many rows at a time
allRowsQuery.setRowLimit(storeManager.getPageSize()).setConcurrencyLevel(// one execution thread for fetching portion of rows
1).setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
// make 3 re-tries
return retries > 2;
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentBackendException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
use of com.netflix.astyanax.query.AllRowsQuery in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, sliceQuery.getLimit());
}
Rows<ByteBuffer, ByteBuffer> result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = // pre-fetch that many rows at a time
allRowsQuery.setRowLimit(storeManager.getPageSize()).setConcurrencyLevel(// one execution thread for fetching portion of rows
1).setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
// make 3 re-tries
return retries > 2;
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentBackendException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
use of com.netflix.astyanax.query.AllRowsQuery in project titan by thinkaurelius.
the class AstyanaxOrderedKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws StorageException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentStorageException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
int limit = (sliceQuery.hasLimit()) ? sliceQuery.getLimit() : Integer.MAX_VALUE;
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, limit);
}
Rows<ByteBuffer, ByteBuffer> result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = // pre-fetch that many rows at a time
allRowsQuery.setRowLimit(storeManager.getPageSize()).setConcurrencyLevel(// one execution thread for fetching portion of rows
1).setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
// make 3 re-tries
return retries > 2;
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentStorageException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
Aggregations