use of com.datastax.driver.core.ResultSet in project zipkin by openzipkin.
the class CassandraSpanStore method getSpansByTraceIds.
/**
* Get the available trace information from the storage system. Spans in trace should be sorted by
* the first annotation timestamp in that span. First event should be first in the spans list. <p>
* The return list will contain only spans that have been found, thus the return list may not
* match the provided list of ids.
*/
ListenableFuture<List<Span>> getSpansByTraceIds(Set<Long> traceIds, int limit) {
checkNotNull(traceIds, "traceIds");
if (traceIds.isEmpty()) {
return immediateFuture(Collections.<Span>emptyList());
}
try {
BoundStatement bound = CassandraUtil.bindWithName(selectTraces, "select-traces").setSet("trace_id", traceIds).setInt("limit_", limit);
bound.setFetchSize(Integer.MAX_VALUE);
return transform(session.executeAsync(bound), new Function<ResultSet, List<Span>>() {
@Override
public List<Span> apply(ResultSet input) {
List<Span> result = new ArrayList<>(input.getAvailableWithoutFetching());
for (Row row : input) {
result.add(Codec.THRIFT.readSpan(row.getBytes("span")));
}
return result;
}
});
} catch (RuntimeException ex) {
return immediateFailedFuture(ex);
}
}
use of com.datastax.driver.core.ResultSet in project zipkin by openzipkin.
the class CassandraSpanStore method getTraceIdsByAnnotation.
ListenableFuture<Map<Long, Long>> getTraceIdsByAnnotation(String annotationKey, long endTs, long lookback, int limit) {
long startTs = endTs - lookback;
try {
BoundStatement bound = CassandraUtil.bindWithName(selectTraceIdsByAnnotation, "select-trace-ids-by-annotation").setBytes("annotation", CassandraUtil.toByteBuffer(annotationKey)).setSet("bucket", buckets).setBytesUnsafe("start_ts", timestampCodec.serialize(startTs)).setBytesUnsafe("end_ts", timestampCodec.serialize(endTs)).setInt("limit_", limit);
bound.setFetchSize(Integer.MAX_VALUE);
return transform(session.executeAsync(bound), new Function<ResultSet, Map<Long, Long>>() {
@Override
public Map<Long, Long> apply(ResultSet input) {
Map<Long, Long> traceIdsToTimestamps = new LinkedHashMap<>();
for (Row row : input) {
traceIdsToTimestamps.put(row.getLong("trace_id"), timestampCodec.deserialize(row, "ts"));
}
return traceIdsToTimestamps;
}
});
} catch (CharacterCodingException | RuntimeException ex) {
return immediateFailedFuture(ex);
}
}
use of com.datastax.driver.core.ResultSet in project spring-boot by spring-projects.
the class CassandraHealthIndicator method doHealthCheck.
@Override
protected void doHealthCheck(Health.Builder builder) throws Exception {
try {
Select select = QueryBuilder.select("release_version").from("system", "local");
ResultSet results = this.cassandraOperations.getCqlOperations().queryForResultSet(select);
if (results.isExhausted()) {
builder.up();
return;
}
String version = results.one().getString(0);
builder.up().withDetail("version", version);
} catch (Exception ex) {
builder.down(ex);
}
}
use of com.datastax.driver.core.ResultSet in project cassandra by apache.
the class CqlInputFormat method describeSplits.
private Map<TokenRange, Long> describeSplits(String keyspace, String table, TokenRange tokenRange, int splitSize, int splitSizeMb, Session session) {
String query = String.format("SELECT mean_partition_size, partitions_count " + "FROM %s.%s " + "WHERE keyspace_name = ? AND table_name = ? AND range_start = ? AND range_end = ?", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.SIZE_ESTIMATES);
ResultSet resultSet = session.execute(query, keyspace, table, tokenRange.getStart().toString(), tokenRange.getEnd().toString());
Row row = resultSet.one();
long meanPartitionSize = 0;
long partitionCount = 0;
int splitCount = 0;
if (row != null) {
meanPartitionSize = row.getLong("mean_partition_size");
partitionCount = row.getLong("partitions_count");
splitCount = splitSizeMb > 0 ? (int) (meanPartitionSize * partitionCount / splitSizeMb / 1024 / 1024) : (int) (partitionCount / splitSize);
}
// Assume smallest granularity of partition count available from CASSANDRA-7688
if (splitCount == 0) {
Map<TokenRange, Long> wrappedTokenRange = new HashMap<>();
wrappedTokenRange.put(tokenRange, (long) 128);
return wrappedTokenRange;
}
List<TokenRange> splitRanges = tokenRange.splitEvenly(splitCount);
Map<TokenRange, Long> rangesWithLength = new HashMap<>();
for (TokenRange range : splitRanges) rangesWithLength.put(range, partitionCount / splitCount);
return rangesWithLength;
}
use of com.datastax.driver.core.ResultSet in project cassandra by apache.
the class ViewTest method testPartitionKeyOnlyTable.
@Test
public void testPartitionKeyOnlyTable() throws Throwable {
createTable("CREATE TABLE %s (" + "a int," + "b int," + "PRIMARY KEY ((a, b)))");
executeNet(protocolVersion, "USE " + keyspace());
// Cannot use SELECT *, as those are always handled by the includeAll shortcut in View.updateAffectsView
createView("mv1", "CREATE MATERIALIZED VIEW %s AS SELECT a, b FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (b, a)");
updateView("INSERT INTO %s (a, b) VALUES (?, ?)", 1, 1);
ResultSet mvRows = executeNet(protocolVersion, "SELECT a, b FROM mv1");
assertRowsNet(protocolVersion, mvRows, row(1, 1));
}
Aggregations