use of com.datastax.driver.core.PreparedStatement in project data-transfer-project by google.
the class CosmosStore method remove.
private void remove(UUID id, String query) {
PreparedStatement statement = session.prepare(query);
BoundStatement boundStatement = new BoundStatement(statement);
boundStatement.setUUID(0, id);
session.execute(boundStatement);
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueService method getLatestTimestampsInternal.
private Map<Cell, Long> getLatestTimestampsInternal(final TableReference tableRef, Map<Cell, Long> timestampByCell) throws Exception {
int fetchBatchCount = config.fetchBatchCount();
Iterable<List<Cell>> partitions = Iterables.partition(timestampByCell.keySet(), fetchBatchCount);
int numPartitions = (timestampByCell.size() / fetchBatchCount) + (timestampByCell.size() % fetchBatchCount > 0 ? 1 : 0);
List<Future<Map<Cell, Long>>> futures = Lists.newArrayListWithCapacity(numPartitions);
String loadOnlyTsQuery = "SELECT " + fieldNameProvider.row() + ", " + fieldNameProvider.column() + ", " + fieldNameProvider.timestamp() + " FROM " + getFullTableName(tableRef) + " WHERE " + fieldNameProvider.row() + " = ?" + " AND " + fieldNameProvider.column() + " = ?" + " LIMIT 1";
if (timestampByCell.size() > fetchBatchCount) {
log.warn("Re-batching in getLatestTimestamps a call to {} that attempted to multiget {} cells; " + "this may indicate overly-large batching on a higher level.\n{}", tableRef, timestampByCell.size(), CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
}
for (final List<Cell> partition : partitions) {
futures.add(executor.submit(AnnotatedCallable.wrapWithThreadName(AnnotationType.PREPEND, "Atlas CQL getLatestTimestamps partition of " + partition.size(), () -> {
PreparedStatement preparedStatement = getPreparedStatement(tableRef, loadOnlyTsQuery, session);
preparedStatement.setConsistencyLevel(readConsistency);
List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithExpectedSize(partition.size());
for (Cell c : partition) {
BoundStatement boundStatement = preparedStatement.bind();
boundStatement.setBytes(fieldNameProvider.row(), ByteBuffer.wrap(c.getRowName()));
boundStatement.setBytes(fieldNameProvider.column(), ByteBuffer.wrap(c.getColumnName()));
resultSetFutures.add(session.executeAsync(boundStatement));
}
Map<Cell, Long> res = Maps.newHashMapWithExpectedSize(partition.size());
for (ResultSetFuture resultSetFuture : resultSetFutures) {
ResultSet resultSet = resultSetFuture.getUninterruptibly();
for (Row row : resultSet.all()) {
res.put(Cell.create(getRowName(row), getColName(row)), getTs(row));
}
cqlKeyValueServices.logTracedQuery(loadOnlyTsQuery, resultSet, session, cqlStatementCache.normalQuery);
}
return res;
})));
}
Map<Cell, Long> res = Maps.newHashMapWithExpectedSize(timestampByCell.size());
for (Future<Map<Cell, Long>> f : futures) {
try {
res.putAll(f.get());
} catch (InterruptedException e) {
throw Throwables.throwUncheckedException(e);
} catch (ExecutionException e) {
Throwables.throwIfInstance(e, Error.class);
throw Throwables.throwUncheckedException(e.getCause());
}
}
return res;
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueService method getPutPartitionResultSetFuture.
protected ResultSetFuture getPutPartitionResultSetFuture(TableReference tableRef, List<Entry<Cell, Value>> partition, TransactionType transactionType, int ttl) {
PreparedStatement preparedStatement = getPreparedStatement(tableRef, getPutQueryForPossibleTransaction(tableRef, transactionType, ttl), session);
preparedStatement.setConsistencyLevel(writeConsistency);
// Be mindful when using the atomicity semantics of UNLOGGED batch statements.
// This usage should be okay, as the KVS.multiPut explicitly does not guarantee
// atomicity across cells (nor batch isolation, which we also cannot provide)
BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED);
if (shouldTraceQuery(tableRef)) {
batchStatement.enableTracing();
}
for (Entry<Cell, Value> e : partition) {
BoundStatement boundStatement = preparedStatement.bind();
boundStatement.setBytes(fieldNameProvider.row(), ByteBuffer.wrap(e.getKey().getRowName()));
boundStatement.setBytes(fieldNameProvider.column(), ByteBuffer.wrap(e.getKey().getColumnName()));
boundStatement.setLong(fieldNameProvider.timestamp(), ~e.getValue().getTimestamp());
boundStatement.setBytes(fieldNameProvider.value(), ByteBuffer.wrap(e.getValue().getContents()));
if (partition.size() > 1) {
batchStatement.add(boundStatement);
} else {
return session.executeAsync(boundStatement);
}
}
return session.executeAsync(batchStatement);
}
use of com.datastax.driver.core.PreparedStatement in project atlasdb by palantir.
the class CqlKeyValueServices method getPeers.
public static Set<Peer> getPeers(Session session) {
PreparedStatement selectPeerInfo = session.prepare("select peer, data_center, rack," + " release_version, rpc_address, schema_version, tokens from system.peers;");
Set<Peer> peers = Sets.newHashSet();
for (Row row : session.execute(selectPeerInfo.bind()).all()) {
Peer peer = new Peer();
peer.peer = row.getInet("peer");
peer.dataCenter = row.getString("data_center");
peer.rack = row.getString("rack");
peer.releaseVersion = row.getString("release_version");
peer.rpcAddress = row.getInet("rpc_address");
peer.schemaVersion = row.getUUID("schema_version");
peer.tokens = row.getSet("tokens", String.class);
peers.add(peer);
}
return peers;
}
use of com.datastax.driver.core.PreparedStatement in project newts by OpenNMS.
the class CassandraIndexerStressITCase method canIndexManyResources.
@Test
public void canIndexManyResources() {
final int numResources = 20000;
final int numSamplesPerResource = 3;
// Setup the indexer
ResultSetFuture future = mock(ResultSetFuture.class);
CassandraSession session = mock(CassandraSession.class);
when(session.executeAsync(any(Statement.class))).thenReturn(future);
PreparedStatement preparedStatement = mock(PreparedStatement.class);
BoundStatement boundStatement = mock(BoundStatement.class);
when(session.prepare(any(RegularStatement.class))).thenReturn(preparedStatement);
when(preparedStatement.bind()).thenReturn(boundStatement);
when(boundStatement.setString(any(String.class), any(String.class))).thenReturn(boundStatement);
ContextConfigurations contexts = new ContextConfigurations();
MetricRegistry metrics = new MetricRegistry();
CassandraIndexingOptions options = new CassandraIndexingOptions.Builder().withHierarchicalIndexing(true).build();
ResourceIdSplitter resourceIdSplitter = new EscapableResourceIdSplitter();
GuavaResourceMetadataCache cache = new GuavaResourceMetadataCache(numResources * 2, metrics);
CassandraIndexer indexer = new CassandraIndexer(session, 0, cache, metrics, options, resourceIdSplitter, contexts);
// Generate the resources and sample sets
Resource[] resources = new Resource[numResources];
List<List<Sample>> sampleSets = Lists.newArrayListWithCapacity(numResources);
System.out.println("Building sample sets...");
for (int i = 0; i < numResources; i++) {
resources[i] = new Resource(String.format("snmp:%d:eth0-x:ifHcInOctets", i));
List<Sample> samples = Lists.newArrayListWithCapacity(numSamplesPerResource);
for (int j = 0; j < numSamplesPerResource; j++) {
samples.add(new Sample(Timestamp.now(), resources[i], "y" + j, MetricType.COUNTER, new Counter(i * j)));
}
sampleSets.add(samples);
}
;
System.out.println("Done building sample sets.");
// Index the resources and associated samples several times over
for (int k = 0; k < 3; k++) {
System.out.println("Indexing samples sets...");
long start = System.currentTimeMillis();
for (List<Sample> sampleSet : sampleSets) {
indexer.update(sampleSet);
}
long elapsed = System.currentTimeMillis() - start;
System.out.println("Done indexing samples in : " + elapsed + " ms");
}
}
Aggregations