use of com.palantir.common.base.FunctionCheckedException in project atlasdb by palantir.
the class CassandraKeyValueServiceImpl method getRowsForSingleHost.
private Map<Cell, Value> getRowsForSingleHost(final InetSocketAddress host, final TableReference tableRef, final List<byte[]> rows, final long startTs) {
try {
int rowCount = 0;
final Map<Cell, Value> result = Maps.newHashMap();
int fetchBatchCount = config.fetchBatchCount();
for (final List<byte[]> batch : Lists.partition(rows, fetchBatchCount)) {
rowCount += batch.size();
result.putAll(clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Map<Cell, Value>, Exception>() {
@Override
public Map<Cell, Value> apply(CassandraClient client) throws Exception {
// We want to get all the columns in the row so set start and end to empty.
SlicePredicate pred = SlicePredicates.create(Range.ALL, Limit.NO_LIMIT);
List<ByteBuffer> rowNames = wrap(batch);
Map<ByteBuffer, List<ColumnOrSuperColumn>> results = wrappingQueryRunner.multiget("getRows", client, tableRef, rowNames, pred, readConsistency);
Map<Cell, Value> ret = Maps.newHashMapWithExpectedSize(batch.size());
new ValueExtractor(ret).extractResults(results, startTs, ColumnSelection.all());
return ret;
}
@Override
public String toString() {
return "multiget_slice(" + tableRef.getQualifiedName() + ", " + batch.size() + " rows" + ")";
}
}));
}
if (rowCount > fetchBatchCount) {
log.warn("Rebatched in getRows a call to {} that attempted to multiget {} rows; " + "this may indicate overly-large batching on a higher level.\n{}", LoggingArgs.tableRef(tableRef), SafeArg.of("rowCount", rowCount), SafeArg.of("stacktrace", CassandraKeyValueServices.getFilteredStackTrace("com.palantir")));
}
return ImmutableMap.copyOf(result);
} catch (Exception e) {
throw QosAwareThrowables.unwrapAndThrowRateLimitExceededOrAtlasDbDependencyException(e);
}
}
use of com.palantir.common.base.FunctionCheckedException in project atlasdb by palantir.
the class CellLoader method getLoadWithTsTasksForSingleHost.
// TODO(unknown): after cassandra api change: handle different column select per row
private List<Callable<Void>> getLoadWithTsTasksForSingleHost(final String kvsMethodName, final InetSocketAddress host, final TableReference tableRef, final Collection<Cell> cells, final long startTs, final boolean loadAllTs, final CassandraKeyValueServices.ThreadSafeResultVisitor visitor, final ConsistencyLevel consistency) {
final ColumnParent colFam = new ColumnParent(CassandraKeyValueServiceImpl.internalTableName(tableRef));
Multimap<byte[], Cell> cellsByCol = TreeMultimap.create(UnsignedBytes.lexicographicalComparator(), Ordering.natural());
for (Cell cell : cells) {
cellsByCol.put(cell.getColumnName(), cell);
}
List<Callable<Void>> tasks = Lists.newArrayList();
int fetchBatchCount = config.fetchBatchCount();
for (Map.Entry<byte[], Collection<Cell>> entry : Multimaps.asMap(cellsByCol).entrySet()) {
final byte[] col = entry.getKey();
Collection<Cell> columnCells = entry.getValue();
if (columnCells.size() > fetchBatchCount) {
log.warn("Re-batching in getLoadWithTsTasksForSingleHost a call to {} for table {} that attempted to " + "multiget {} rows; this may indicate overly-large batching on a higher level.\n{}", SafeArg.of("host", CassandraLogHelper.host(host)), LoggingArgs.tableRef(tableRef), SafeArg.of("rows", columnCells.size()), SafeArg.of("stacktrace", CassandraKeyValueServices.getFilteredStackTrace("com.palantir")));
}
for (final List<Cell> partition : Lists.partition(ImmutableList.copyOf(columnCells), fetchBatchCount)) {
Callable<Void> multiGetCallable = () -> clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Void, Exception>() {
@Override
public Void apply(CassandraClient client) throws Exception {
SlicePredicates.Range range = SlicePredicates.Range.singleColumn(col, startTs);
SlicePredicates.Limit limit = loadAllTs ? SlicePredicates.Limit.NO_LIMIT : SlicePredicates.Limit.ONE;
SlicePredicate predicate = SlicePredicates.create(range, limit);
List<ByteBuffer> rowNames = Lists.newArrayListWithCapacity(partition.size());
for (Cell c : partition) {
rowNames.add(ByteBuffer.wrap(c.getRowName()));
}
if (log.isTraceEnabled()) {
log.trace("Requesting {} cells from {} {}starting at timestamp {} on {}", SafeArg.of("cells", partition.size()), LoggingArgs.tableRef(tableRef), SafeArg.of("timestampClause", loadAllTs ? "for all timestamps " : ""), SafeArg.of("startTs", startTs), SafeArg.of("host", CassandraLogHelper.host(host)));
}
Map<ByteBuffer, List<ColumnOrSuperColumn>> results = queryRunner.multiget(kvsMethodName, client, tableRef, rowNames, predicate, consistency);
visitor.visit(results);
return null;
}
@Override
public String toString() {
return "multiget_slice(" + host + ", " + colFam + ", " + partition.size() + " cells" + ")";
}
});
tasks.add(AnnotatedCallable.wrapWithThreadName(AnnotationType.PREPEND, "Atlas loadWithTs " + partition.size() + " cells from " + tableRef + " on " + host, multiGetCallable));
}
}
return tasks;
}
Aggregations