use of com.datastax.driver.core.Cluster in project spring-boot by spring-projects.
the class CassandraAutoConfigurationTests method createClusterWithDefault.
@Test
public void createClusterWithDefault() {
load();
assertThat(this.context.getBeanNamesForType(Cluster.class).length).isEqualTo(1);
Cluster cluster = this.context.getBean(Cluster.class);
assertThat(cluster.getClusterName()).startsWith("cluster");
}
use of com.datastax.driver.core.Cluster in project spring-boot by spring-projects.
the class CassandraAutoConfiguration method cluster.
@Bean
@ConditionalOnMissingBean
public Cluster cluster() {
CassandraProperties properties = this.properties;
Cluster.Builder builder = Cluster.builder().withClusterName(properties.getClusterName()).withPort(properties.getPort());
if (properties.getUsername() != null) {
builder.withCredentials(properties.getUsername(), properties.getPassword());
}
if (properties.getCompression() != null) {
builder.withCompression(properties.getCompression());
}
if (properties.getLoadBalancingPolicy() != null) {
LoadBalancingPolicy policy = instantiate(properties.getLoadBalancingPolicy());
builder.withLoadBalancingPolicy(policy);
}
builder.withQueryOptions(getQueryOptions());
if (properties.getReconnectionPolicy() != null) {
ReconnectionPolicy policy = instantiate(properties.getReconnectionPolicy());
builder.withReconnectionPolicy(policy);
}
if (properties.getRetryPolicy() != null) {
RetryPolicy policy = instantiate(properties.getRetryPolicy());
builder.withRetryPolicy(policy);
}
builder.withSocketOptions(getSocketOptions());
if (properties.isSsl()) {
builder.withSSL();
}
String points = properties.getContactPoints();
builder.addContactPoints(StringUtils.commaDelimitedListToStringArray(points));
customize(builder);
return builder.build();
}
use of com.datastax.driver.core.Cluster in project cassandra by apache.
the class CqlConfigHelper method getCluster.
public static Cluster getCluster(String[] hosts, Configuration conf, int port) {
Optional<AuthProvider> authProvider = getAuthProvider(conf);
Optional<SSLOptions> sslOptions = getSSLOptions(conf);
Optional<Integer> protocolVersion = getProtocolVersion(conf);
LoadBalancingPolicy loadBalancingPolicy = getReadLoadBalancingPolicy(hosts);
SocketOptions socketOptions = getReadSocketOptions(conf);
QueryOptions queryOptions = getReadQueryOptions(conf);
PoolingOptions poolingOptions = getReadPoolingOptions(conf);
Cluster.Builder builder = Cluster.builder().addContactPoints(hosts).withPort(port).withCompression(ProtocolOptions.Compression.NONE);
if (authProvider.isPresent())
builder.withAuthProvider(authProvider.get());
if (sslOptions.isPresent())
builder.withSSL(sslOptions.get());
if (protocolVersion.isPresent()) {
builder.withProtocolVersion(ProtocolVersion.fromInt(protocolVersion.get()));
}
builder.withLoadBalancingPolicy(loadBalancingPolicy).withSocketOptions(socketOptions).withQueryOptions(queryOptions).withPoolingOptions(poolingOptions);
return builder.build();
}
use of com.datastax.driver.core.Cluster in project cassandra by apache.
the class CqlInputFormat method getSplits.
public List<org.apache.hadoop.mapreduce.InputSplit> getSplits(JobContext context) throws IOException {
Configuration conf = HadoopCompat.getConfiguration(context);
validateConfiguration(conf);
keyspace = ConfigHelper.getInputKeyspace(conf);
cfName = ConfigHelper.getInputColumnFamily(conf);
partitioner = ConfigHelper.getInputPartitioner(conf);
logger.trace("partitioner is {}", partitioner);
// canonical ranges, split into pieces, fetching the splits in parallel
ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
List<org.apache.hadoop.mapreduce.InputSplit> splits = new ArrayList<>();
try (Cluster cluster = CqlConfigHelper.getInputCluster(ConfigHelper.getInputInitialAddress(conf).split(","), conf);
Session session = cluster.connect()) {
List<Future<List<org.apache.hadoop.mapreduce.InputSplit>>> splitfutures = new ArrayList<>();
Pair<String, String> jobKeyRange = ConfigHelper.getInputKeyRange(conf);
Range<Token> jobRange = null;
if (jobKeyRange != null) {
jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.left), partitioner.getTokenFactory().fromString(jobKeyRange.right));
}
Metadata metadata = cluster.getMetadata();
// canonical ranges and nodes holding replicas
Map<TokenRange, Set<Host>> masterRangeNodes = getRangeMap(keyspace, metadata);
for (TokenRange range : masterRangeNodes.keySet()) {
if (jobRange == null) {
// for each tokenRange, pick a live owner and ask it to compute bite-sized splits
splitfutures.add(executor.submit(new SplitCallable(range, masterRangeNodes.get(range), conf, session)));
} else {
TokenRange jobTokenRange = rangeToTokenRange(metadata, jobRange);
if (range.intersects(jobTokenRange)) {
for (TokenRange intersection : range.intersectWith(jobTokenRange)) {
// for each tokenRange, pick a live owner and ask it to compute bite-sized splits
splitfutures.add(executor.submit(new SplitCallable(intersection, masterRangeNodes.get(range), conf, session)));
}
}
}
}
// wait until we have all the results back
for (Future<List<org.apache.hadoop.mapreduce.InputSplit>> futureInputSplits : splitfutures) {
try {
splits.addAll(futureInputSplits.get());
} catch (Exception e) {
throw new IOException("Could not get input splits", e);
}
}
} finally {
executor.shutdownNow();
}
assert splits.size() > 0;
Collections.shuffle(splits, new Random(System.nanoTime()));
return splits;
}
use of com.datastax.driver.core.Cluster in project flink by apache.
the class CassandraTupleWriteAheadSinkTest method testAckLoopExitOnException.
@Test(timeout = 20000)
public void testAckLoopExitOnException() throws Exception {
final AtomicReference<Runnable> runnableFuture = new AtomicReference<>();
final ClusterBuilder clusterBuilder = new ClusterBuilder() {
private static final long serialVersionUID = 4624400760492936756L;
@Override
protected Cluster buildCluster(Cluster.Builder builder) {
try {
BoundStatement boundStatement = mock(BoundStatement.class);
when(boundStatement.setDefaultTimestamp(any(long.class))).thenReturn(boundStatement);
PreparedStatement preparedStatement = mock(PreparedStatement.class);
when(preparedStatement.bind(Matchers.anyVararg())).thenReturn(boundStatement);
ResultSetFuture future = mock(ResultSetFuture.class);
when(future.get()).thenThrow(new RuntimeException("Expected exception."));
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
synchronized (runnableFuture) {
runnableFuture.set((((Runnable) invocationOnMock.getArguments()[0])));
runnableFuture.notifyAll();
}
return null;
}
}).when(future).addListener(any(Runnable.class), any(Executor.class));
Session session = mock(Session.class);
when(session.prepare(anyString())).thenReturn(preparedStatement);
when(session.executeAsync(any(BoundStatement.class))).thenReturn(future);
Cluster cluster = mock(Cluster.class);
when(cluster.connect()).thenReturn(session);
return cluster;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
// Our asynchronous executor thread
new Thread(new Runnable() {
@Override
public void run() {
synchronized (runnableFuture) {
while (runnableFuture.get() == null) {
try {
runnableFuture.wait();
} catch (InterruptedException e) {
// ignore interrupts
}
}
}
runnableFuture.get().run();
}
}).start();
CheckpointCommitter cc = mock(CheckpointCommitter.class);
final CassandraTupleWriteAheadSink<Tuple0> sink = new CassandraTupleWriteAheadSink<>("abc", TupleTypeInfo.of(Tuple0.class).createSerializer(new ExecutionConfig()), clusterBuilder, cc);
OneInputStreamOperatorTestHarness<Tuple0, Tuple0> harness = new OneInputStreamOperatorTestHarness(sink);
harness.getEnvironment().getTaskConfiguration().setBoolean("checkpointing", true);
harness.setup();
sink.open();
// we should leave the loop and return false since we've seen an exception
assertFalse(sink.sendValues(Collections.singleton(new Tuple0()), 0L));
sink.close();
}
Aggregations