Search in sources :

Example 1 with FunctionCheckedException

use of com.palantir.common.base.FunctionCheckedException in project atlasdb by palantir.

the class CassandraKeyValueServiceImpl method getRowsForSingleHost.

private Map<Cell, Value> getRowsForSingleHost(final InetSocketAddress host, final TableReference tableRef, final List<byte[]> rows, final long startTs) {
    try {
        int rowCount = 0;
        final Map<Cell, Value> result = Maps.newHashMap();
        int fetchBatchCount = config.fetchBatchCount();
        for (final List<byte[]> batch : Lists.partition(rows, fetchBatchCount)) {
            rowCount += batch.size();
            result.putAll(clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Map<Cell, Value>, Exception>() {

                @Override
                public Map<Cell, Value> apply(CassandraClient client) throws Exception {
                    // We want to get all the columns in the row so set start and end to empty.
                    SlicePredicate pred = SlicePredicates.create(Range.ALL, Limit.NO_LIMIT);
                    List<ByteBuffer> rowNames = wrap(batch);
                    Map<ByteBuffer, List<ColumnOrSuperColumn>> results = wrappingQueryRunner.multiget("getRows", client, tableRef, rowNames, pred, readConsistency);
                    Map<Cell, Value> ret = Maps.newHashMapWithExpectedSize(batch.size());
                    new ValueExtractor(ret).extractResults(results, startTs, ColumnSelection.all());
                    return ret;
                }

                @Override
                public String toString() {
                    return "multiget_slice(" + tableRef.getQualifiedName() + ", " + batch.size() + " rows" + ")";
                }
            }));
        }
        if (rowCount > fetchBatchCount) {
            log.warn("Rebatched in getRows a call to {} that attempted to multiget {} rows; " + "this may indicate overly-large batching on a higher level.\n{}", LoggingArgs.tableRef(tableRef), SafeArg.of("rowCount", rowCount), SafeArg.of("stacktrace", CassandraKeyValueServices.getFilteredStackTrace("com.palantir")));
        }
        return ImmutableMap.copyOf(result);
    } catch (Exception e) {
        throw QosAwareThrowables.unwrapAndThrowRateLimitExceededOrAtlasDbDependencyException(e);
    }
}
Also used : SlicePredicate(org.apache.cassandra.thrift.SlicePredicate) ByteBuffer(java.nio.ByteBuffer) InsufficientConsistencyException(com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException) CheckAndSetException(com.palantir.atlasdb.keyvalue.api.CheckAndSetException) KeyAlreadyExistsException(com.palantir.atlasdb.keyvalue.api.KeyAlreadyExistsException) FunctionCheckedException(com.palantir.common.base.FunctionCheckedException) TException(org.apache.thrift.TException) UnavailableException(org.apache.cassandra.thrift.UnavailableException) PalantirRuntimeException(com.palantir.common.exception.PalantirRuntimeException) FunctionCheckedException(com.palantir.common.base.FunctionCheckedException) Value(com.palantir.atlasdb.keyvalue.api.Value) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Cell(com.palantir.atlasdb.keyvalue.api.Cell)

Example 2 with FunctionCheckedException

use of com.palantir.common.base.FunctionCheckedException in project atlasdb by palantir.

the class CellLoader method getLoadWithTsTasksForSingleHost.

// TODO(unknown): after cassandra api change: handle different column select per row
private List<Callable<Void>> getLoadWithTsTasksForSingleHost(final String kvsMethodName, final InetSocketAddress host, final TableReference tableRef, final Collection<Cell> cells, final long startTs, final boolean loadAllTs, final CassandraKeyValueServices.ThreadSafeResultVisitor visitor, final ConsistencyLevel consistency) {
    final ColumnParent colFam = new ColumnParent(CassandraKeyValueServiceImpl.internalTableName(tableRef));
    Multimap<byte[], Cell> cellsByCol = TreeMultimap.create(UnsignedBytes.lexicographicalComparator(), Ordering.natural());
    for (Cell cell : cells) {
        cellsByCol.put(cell.getColumnName(), cell);
    }
    List<Callable<Void>> tasks = Lists.newArrayList();
    int fetchBatchCount = config.fetchBatchCount();
    for (Map.Entry<byte[], Collection<Cell>> entry : Multimaps.asMap(cellsByCol).entrySet()) {
        final byte[] col = entry.getKey();
        Collection<Cell> columnCells = entry.getValue();
        if (columnCells.size() > fetchBatchCount) {
            log.warn("Re-batching in getLoadWithTsTasksForSingleHost a call to {} for table {} that attempted to " + "multiget {} rows; this may indicate overly-large batching on a higher level.\n{}", SafeArg.of("host", CassandraLogHelper.host(host)), LoggingArgs.tableRef(tableRef), SafeArg.of("rows", columnCells.size()), SafeArg.of("stacktrace", CassandraKeyValueServices.getFilteredStackTrace("com.palantir")));
        }
        for (final List<Cell> partition : Lists.partition(ImmutableList.copyOf(columnCells), fetchBatchCount)) {
            Callable<Void> multiGetCallable = () -> clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Void, Exception>() {

                @Override
                public Void apply(CassandraClient client) throws Exception {
                    SlicePredicates.Range range = SlicePredicates.Range.singleColumn(col, startTs);
                    SlicePredicates.Limit limit = loadAllTs ? SlicePredicates.Limit.NO_LIMIT : SlicePredicates.Limit.ONE;
                    SlicePredicate predicate = SlicePredicates.create(range, limit);
                    List<ByteBuffer> rowNames = Lists.newArrayListWithCapacity(partition.size());
                    for (Cell c : partition) {
                        rowNames.add(ByteBuffer.wrap(c.getRowName()));
                    }
                    if (log.isTraceEnabled()) {
                        log.trace("Requesting {} cells from {} {}starting at timestamp {} on {}", SafeArg.of("cells", partition.size()), LoggingArgs.tableRef(tableRef), SafeArg.of("timestampClause", loadAllTs ? "for all timestamps " : ""), SafeArg.of("startTs", startTs), SafeArg.of("host", CassandraLogHelper.host(host)));
                    }
                    Map<ByteBuffer, List<ColumnOrSuperColumn>> results = queryRunner.multiget(kvsMethodName, client, tableRef, rowNames, predicate, consistency);
                    visitor.visit(results);
                    return null;
                }

                @Override
                public String toString() {
                    return "multiget_slice(" + host + ", " + colFam + ", " + partition.size() + " cells" + ")";
                }
            });
            tasks.add(AnnotatedCallable.wrapWithThreadName(AnnotationType.PREPEND, "Atlas loadWithTs " + partition.size() + " cells from " + tableRef + " on " + host, multiGetCallable));
        }
    }
    return tasks;
}
Also used : ColumnOrSuperColumn(org.apache.cassandra.thrift.ColumnOrSuperColumn) ColumnParent(org.apache.cassandra.thrift.ColumnParent) SlicePredicate(org.apache.cassandra.thrift.SlicePredicate) Callable(java.util.concurrent.Callable) AnnotatedCallable(com.palantir.atlasdb.util.AnnotatedCallable) FunctionCheckedException(com.palantir.common.base.FunctionCheckedException) Collection(java.util.Collection) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) Cell(com.palantir.atlasdb.keyvalue.api.Cell) Map(java.util.Map)

Aggregations

ImmutableList (com.google.common.collect.ImmutableList)2 Cell (com.palantir.atlasdb.keyvalue.api.Cell)2 FunctionCheckedException (com.palantir.common.base.FunctionCheckedException)2 List (java.util.List)2 SlicePredicate (org.apache.cassandra.thrift.SlicePredicate)2 CheckAndSetException (com.palantir.atlasdb.keyvalue.api.CheckAndSetException)1 InsufficientConsistencyException (com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException)1 KeyAlreadyExistsException (com.palantir.atlasdb.keyvalue.api.KeyAlreadyExistsException)1 Value (com.palantir.atlasdb.keyvalue.api.Value)1 AnnotatedCallable (com.palantir.atlasdb.util.AnnotatedCallable)1 PalantirRuntimeException (com.palantir.common.exception.PalantirRuntimeException)1 ByteBuffer (java.nio.ByteBuffer)1 Collection (java.util.Collection)1 Map (java.util.Map)1 Callable (java.util.concurrent.Callable)1 ColumnOrSuperColumn (org.apache.cassandra.thrift.ColumnOrSuperColumn)1 ColumnParent (org.apache.cassandra.thrift.ColumnParent)1 UnavailableException (org.apache.cassandra.thrift.UnavailableException)1 TException (org.apache.thrift.TException)1