use of com.datastax.driver.core.Session in project camel by apache.
the class CassandraAggregationSerializedHeadersTest method doPreSetup.
@Override
protected void doPreSetup() throws Exception {
assumeTrue("Skipping test running in CI server - Fails sometimes on CI server with address already in use", System.getenv("BUILD_ID") == null);
CassandraUnitUtils.startEmbeddedCassandra();
cluster = CassandraUnitUtils.cassandraCluster();
Session rootSession = cluster.connect();
CassandraUnitUtils.loadCQLDataSet(rootSession, "NamedAggregationDataSet.cql");
rootSession.close();
aggregationRepository = new NamedCassandraAggregationRepository(cluster, CassandraUnitUtils.KEYSPACE, "ID");
aggregationRepository.setTable("NAMED_CAMEL_AGGREGATION");
aggregationRepository.setAllowSerializedHeaders(true);
aggregationRepository.start();
super.doPreSetup();
}
use of com.datastax.driver.core.Session in project camel by apache.
the class CassandraAggregationTest method doPreSetup.
@Override
protected void doPreSetup() throws Exception {
if (canTest()) {
CassandraUnitUtils.startEmbeddedCassandra();
cluster = CassandraUnitUtils.cassandraCluster();
Session rootSession = cluster.connect();
CassandraUnitUtils.loadCQLDataSet(rootSession, "NamedAggregationDataSet.cql");
rootSession.close();
aggregationRepository = new NamedCassandraAggregationRepository(cluster, CassandraUnitUtils.KEYSPACE, "ID");
aggregationRepository.setTable("NAMED_CAMEL_AGGREGATION");
aggregationRepository.start();
}
super.doPreSetup();
}
use of com.datastax.driver.core.Session in project zipkin by openzipkin.
the class DefaultSessionFactory method create.
/**
* Creates a session and ensures schema if configured. Closes the cluster and session if any
* exception occurred.
*/
@Override
public Session create(Cassandra3Storage cassandra) {
Closer closer = Closer.create();
try {
Cluster cluster = closer.register(buildCluster(cassandra));
cluster.register(new QueryLogger.Builder().build());
Session session;
if (cassandra.ensureSchema) {
session = closer.register(cluster.connect());
Schema.ensureExists(cassandra.keyspace, session);
session.execute("USE " + cassandra.keyspace);
} else {
session = cluster.connect(cassandra.keyspace);
}
initializeUDTs(session);
return session;
} catch (RuntimeException e) {
try {
closer.close();
} catch (IOException ignored) {
}
throw e;
}
}
use of com.datastax.driver.core.Session in project zipkin by openzipkin.
the class DeduplicatingExecutorTest method multithreaded.
/**
* This shows that any number of threads perform a computation only once.
*/
@Test
public void multithreaded() throws Exception {
Session session = mock(Session.class);
DeduplicatingExecutor executor = new DeduplicatingExecutor(session, TimeUnit.SECONDS.toMillis(1L));
BoundStatement statement = mock(BoundStatement.class);
when(session.executeAsync(statement)).thenAnswer(invocationOnMock -> mock(ResultSetFuture.class));
int loopCount = 1000;
CountDownLatch latch = new CountDownLatch(loopCount);
ExecutorService exec = Executors.newFixedThreadPool(10);
Collection<ListenableFuture<?>> futures = new ConcurrentLinkedDeque<>();
for (int i = 0; i < loopCount; i++) {
exec.execute(() -> {
futures.add(executor.maybeExecuteAsync(statement, "foo"));
futures.add(executor.maybeExecuteAsync(statement, "bar"));
latch.countDown();
});
}
latch.await();
ImmutableSet<ListenableFuture<?>> distinctFutures = ImmutableSet.copyOf(futures);
assertThat(distinctFutures).hasSize(2);
// expire the result
Thread.sleep(1000L);
// Sanity check: we don't memoize after we should have expired.
assertThat(executor.maybeExecuteAsync(statement, "foo")).isNotIn(distinctFutures);
assertThat(executor.maybeExecuteAsync(statement, "bar")).isNotIn(distinctFutures);
}
use of com.datastax.driver.core.Session in project presto by prestodb.
the class NativeCassandraSession method queryPartitionKeys.
protected Iterable<Row> queryPartitionKeys(CassandraTable table, List<Object> filterPrefix) {
CassandraTableHandle tableHandle = table.getTableHandle();
List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns();
if (filterPrefix.size() != partitionKeyColumns.size()) {
return null;
}
Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns);
addWhereClause(partitionKeys.where(), partitionKeyColumns, filterPrefix);
return executeWithSession(session -> session.execute(partitionKeys)).all();
}
Aggregations