use of com.datastax.driver.core.ResultSetFuture in project atlasdb by palantir.
the class CqlKeyValueService method loadWithTs.
private void loadWithTs(final TableReference tableRef, final Set<Cell> cells, final long startTs, boolean loadAllTs, final Visitor<Multimap<Cell, Value>> visitor, final ConsistencyLevel consistency) throws Exception {
List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithCapacity(cells.size());
SortedSetMultimap<byte[], Cell> cellsByCol = TreeMultimap.create(UnsignedBytes.lexicographicalComparator(), Ordering.natural());
for (Cell cell : cells) {
cellsByCol.put(cell.getColumnName(), cell);
}
for (Entry<byte[], SortedSet<Cell>> entry : Multimaps.asMap(cellsByCol).entrySet()) {
if (entry.getValue().size() > config.fetchBatchCount()) {
log.warn("A call to {} is performing a multiget {} cells; this may indicate overly-large batching " + "on a higher level.\n{}", tableRef, entry.getValue().size(), CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
for (List<Cell> batch : Iterables.partition(entry.getValue(), config.fetchBatchCount())) {
String rowBinds = Joiner.on(",").join(Iterables.limit(Iterables.cycle('?'), batch.size()));
final String loadWithTsQuery = "SELECT * FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " IN (" + rowBinds + ") AND " + fieldNameProvider.column() + " = ? AND " + fieldNameProvider.timestamp() + " > ?" + (!loadAllTs ? " LIMIT 1" : "");
final PreparedStatement preparedStatement = getPreparedStatement(tableRef, loadWithTsQuery, session).setConsistencyLevel(consistency);
Object[] args = new Object[batch.size() + 2];
for (int i = 0; i < batch.size(); i++) {
args[i] = ByteBuffer.wrap(batch.get(i).getRowName());
}
args[batch.size()] = ByteBuffer.wrap(entry.getKey());
args[batch.size() + 1] = ~startTs;
ResultSetFuture resultSetFuture = session.executeAsync(preparedStatement.bind(args));
resultSetFutures.add(resultSetFuture);
}
}
String loggedLoadWithTsQuery = "SELECT * FROM " + getFullTableName(tableRef) + " " + "WHERE " + fieldNameProvider.row() + " IN (?, ...) AND " + fieldNameProvider.column() + " = ? AND " + fieldNameProvider.timestamp() + " > ?" + (!loadAllTs ? " LIMIT 1" : "");
for (ResultSetFuture rsf : resultSetFutures) {
visitResults(rsf.getUninterruptibly(), visitor, loggedLoadWithTsQuery, loadAllTs);
}
}
use of com.datastax.driver.core.ResultSetFuture in project atlasdb by palantir.
the class CqlKeyValueService method delete.
@Override
public void delete(final TableReference tableRef, final Multimap<Cell, Long> keys) {
int cellCount = 0;
String deleteQuery = "DELETE FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " = ?" + " AND " + fieldNameProvider.column() + " = ?" + " AND " + fieldNameProvider.timestamp() + " = ?";
int fetchBatchCount = config.fetchBatchCount();
for (final List<Cell> batch : Iterables.partition(keys.keySet(), fetchBatchCount)) {
cellCount += batch.size();
PreparedStatement deleteStatement = getPreparedStatement(tableRef, deleteQuery, longRunningQuerySession).setConsistencyLevel(deleteConsistency);
List<ResultSetFuture> resultSetFutures = Lists.newArrayList();
for (Cell key : batch) {
for (long ts : Ordering.natural().immutableSortedCopy(keys.get(key))) {
BoundStatement boundStatement = deleteStatement.bind(ByteBuffer.wrap(key.getRowName()), ByteBuffer.wrap(key.getColumnName()), ~ts);
resultSetFutures.add(longRunningQuerySession.executeAsync(boundStatement));
}
}
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet;
try {
resultSet = resultSetFuture.getUninterruptibly();
resultSet.all();
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
cqlKeyValueServices.logTracedQuery(deleteQuery, resultSet, session, cqlStatementCache.normalQuery);
}
}
if (cellCount > fetchBatchCount) {
log.warn("Rebatched in delete a call to {} that attempted to delete {} cells; " + "this may indicate overly-large batching on a higher level.\n{}", tableRef, cellCount, CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
}
use of com.datastax.driver.core.ResultSetFuture in project atlasdb by palantir.
the class CqlKeyValueService method getRowsAllColsInternal.
private Map<Cell, Value> getRowsAllColsInternal(final TableReference tableRef, final Iterable<byte[]> rows, final long startTs) throws Exception {
int rowCount = 0;
Map<Cell, Value> result = Maps.newHashMap();
int fetchBatchCount = config.fetchBatchCount();
List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithExpectedSize(rowCount);
for (final List<byte[]> batch : Iterables.partition(rows, fetchBatchCount)) {
rowCount += batch.size();
String getRowsQuery = String.format("SELECT * FROM %s WHERE %s IN (%s)", getFullTableName(tableRef), fieldNameProvider.row(), Joiner.on(",").join(Iterables.limit(Iterables.cycle("?"), batch.size())));
PreparedStatement preparedStatement = getPreparedStatement(tableRef, getRowsQuery, session);
Object[] args = batch.stream().map(ByteBuffer::wrap).toArray();
resultSetFutures.add(session.executeAsync(preparedStatement.bind(args)));
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet;
try {
resultSet = resultSetFuture.getUninterruptibly();
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
for (Row row : resultSet.all()) {
Cell cell = Cell.create(getRowName(row), getColName(row));
if ((getTs(row) < startTs) && (!result.containsKey(cell) || (result.get(cell).getTimestamp() < getTs(row)))) {
result.put(Cell.create(getRowName(row), getColName(row)), Value.create(getValue(row), getTs(row)));
}
}
cqlKeyValueServices.logTracedQuery(getRowsQuery, resultSet, session, cqlStatementCache.normalQuery);
}
}
if (rowCount > fetchBatchCount) {
log.warn("Rebatched in getRows a call to {} that attempted to multiget {} rows; " + "this may indicate overly-large batching on a higher level.\n{}", tableRef.getQualifiedName(), rowCount, CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
return result;
}
use of com.datastax.driver.core.ResultSetFuture in project atlasdb by palantir.
the class CqlKeyValueService method putInternal.
protected void putInternal(TableReference tableRef, Iterable<Map.Entry<Cell, Value>> values, TransactionType transactionType, int ttl, boolean recursive) throws Exception {
List<ResultSetFuture> resultSetFutures = Lists.newArrayList();
int mutationBatchCount = config.mutationBatchCount();
long mutationBatchSizeBytes = limitBatchSizesToServerDefaults ? CqlKeyValueServices.UNCONFIGURED_DEFAULT_BATCH_SIZE_BYTES : config.mutationBatchSizeBytes();
for (List<Entry<Cell, Value>> partition : IterablePartitioner.partitionByCountAndBytes(values, mutationBatchCount, mutationBatchSizeBytes, tableRef, CqlKeyValueServices.PUT_ENTRY_SIZING_FUNCTION)) {
resultSetFutures.add(getPutPartitionResultSetFuture(tableRef, partition, transactionType));
}
final String putQuery = getPutQueryForPossibleTransaction(tableRef, transactionType);
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet;
try {
resultSet = resultSetFuture.getUninterruptibly();
resultSet.all();
cqlKeyValueServices.logTracedQuery(putQuery, resultSet, session, cqlStatementCache.normalQuery);
if (!resultSet.wasApplied()) {
throw new KeyAlreadyExistsException("This transaction row already exists: " + putQuery);
}
} catch (InvalidQueryException e) {
if (e.getMessage().contains("Batch too large") && !recursive) {
log.error("Attempted a put to {} that the Cassandra server" + " deemed to be too large to accept. Batch sizes on the Atlas-side" + " have been artificially lowered to the Cassandra default maximum batch sizes.", tableRef);
limitBatchSizesToServerDefaults = true;
try {
putInternal(tableRef, values, transactionType, ttl, true);
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
} else {
throw Throwables.throwUncheckedException(e);
}
} catch (Throwable t) {
throw Throwables.throwUncheckedException(t);
}
}
}
use of com.datastax.driver.core.ResultSetFuture in project atlasdb by palantir.
the class CqlKeyValueServices method logTracedQuery.
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
public void logTracedQuery(String tracedQuery, ResultSet resultSet, Session session, LoadingCache<String, PreparedStatement> statementCache) {
if (log.isInfoEnabled()) {
List<ExecutionInfo> allExecutionInfo = Lists.newArrayList(resultSet.getAllExecutionInfo());
for (final ExecutionInfo info : allExecutionInfo) {
if (info.getQueryTrace() == null) {
continue;
}
final UUID traceId = info.getQueryTrace().getTraceId();
log.info("Traced query {} with trace uuid {}", tracedQuery, traceId);
traceRetrievalExec.submit((Callable<Void>) () -> {
StringBuilder sb = new StringBuilder();
sb.append("Retrieving traced query " + tracedQuery + " trace uuid: " + traceId);
int tries = 0;
boolean success = false;
while (tries < MAX_TRIES) {
ResultSetFuture sessionFuture = session.executeAsync(statementCache.getUnchecked("SELECT * FROM system_traces.sessions WHERE session_id = ?").bind(traceId));
Row sessionRow = sessionFuture.getUninterruptibly().one();
if (sessionRow != null && !sessionRow.isNull("duration")) {
ResultSetFuture eventFuture = session.executeAsync(statementCache.getUnchecked("SELECT * FROM system_traces.events WHERE session_id = ?").bind(traceId));
List<Row> eventRows = eventFuture.getUninterruptibly().all();
sb.append(" requestType: ").append(sessionRow.getString("request"));
sb.append(" coordinator: ").append(sessionRow.getInet("coordinator"));
sb.append(" started_at: ").append(sessionRow.getTime("started_at"));
sb.append(" duration: ").append(sessionRow.getInt("duration"));
if (!sessionRow.isNull("parameters")) {
sb.append("\nparameters: " + Collections.unmodifiableMap(sessionRow.getMap("parameters", String.class, String.class)));
}
for (Row eventRow : eventRows) {
sb.append(eventRow.getString("activity")).append(" on ").append(eventRow.getInet("source")).append("[").append(eventRow.getString("thread")).append("] at ").append(eventRow.getUUID("event_id").timestamp()).append(" (").append(eventRow.getInt("source_elapsed")).append(" elapsed)\n");
}
success = true;
break;
}
tries++;
Thread.sleep(TRACE_RETRIEVAL_MS_BETWEEN_TRIES);
}
if (!success) {
sb.append(" (retrieval timed out)");
}
log.info("Query trace: {}", sb);
return null;
});
}
}
}
Aggregations