use of com.datastax.driver.core.Cluster in project flink by apache.
the class CassandraTupleWriteAheadSinkTest method testAckLoopExitOnException.
@Test(timeout = 20000)
public void testAckLoopExitOnException() throws Exception {
final AtomicReference<Runnable> runnableFuture = new AtomicReference<>();
final ClusterBuilder clusterBuilder = new ClusterBuilder() {
private static final long serialVersionUID = 4624400760492936756L;
@Override
protected Cluster buildCluster(Cluster.Builder builder) {
try {
BoundStatement boundStatement = mock(BoundStatement.class);
when(boundStatement.setDefaultTimestamp(any(long.class))).thenReturn(boundStatement);
PreparedStatement preparedStatement = mock(PreparedStatement.class);
when(preparedStatement.bind(Matchers.anyVararg())).thenReturn(boundStatement);
ResultSetFuture future = mock(ResultSetFuture.class);
when(future.get()).thenThrow(new RuntimeException("Expected exception."));
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
synchronized (runnableFuture) {
runnableFuture.set((((Runnable) invocationOnMock.getArguments()[0])));
runnableFuture.notifyAll();
}
return null;
}
}).when(future).addListener(any(Runnable.class), any(Executor.class));
Session session = mock(Session.class);
when(session.prepare(anyString())).thenReturn(preparedStatement);
when(session.executeAsync(any(BoundStatement.class))).thenReturn(future);
Cluster cluster = mock(Cluster.class);
when(cluster.connect()).thenReturn(session);
return cluster;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
// Our asynchronous executor thread
new Thread(new Runnable() {
@Override
public void run() {
synchronized (runnableFuture) {
while (runnableFuture.get() == null) {
try {
runnableFuture.wait();
} catch (InterruptedException e) {
// ignore interrupts
}
}
}
runnableFuture.get().run();
}
}).start();
CheckpointCommitter cc = mock(CheckpointCommitter.class);
final CassandraTupleWriteAheadSink<Tuple0> sink = new CassandraTupleWriteAheadSink<>("abc", TupleTypeInfo.of(Tuple0.class).createSerializer(new ExecutionConfig()), clusterBuilder, cc);
OneInputStreamOperatorTestHarness<Tuple0, Tuple0> harness = new OneInputStreamOperatorTestHarness<>(sink);
harness.getEnvironment().getTaskConfiguration().setBoolean("checkpointing", true);
harness.setup();
sink.open();
// we should leave the loop and return false since we've seen an exception
assertFalse(sink.sendValues(Collections.singleton(new Tuple0()), 1L, 0L));
sink.close();
}
use of com.datastax.driver.core.Cluster in project pinpoint by naver.
the class CassandraDriverConnectInterceptor method getHostList.
private List<String> getHostList(Object target) {
if (!(target instanceof Cluster)) {
return Collections.emptyList();
}
final Cluster cluster = (Cluster) target;
final Set<Host> hosts = cluster.getMetadata().getAllHosts();
final int port = cluster.getConfiguration().getProtocolOptions().getPort();
final List<String> hostList = new ArrayList<>();
for (Host host : hosts) {
final String hostAddress = HostAndPort.toHostAndPortString(host.getAddress().getHostAddress(), port);
hostList.add(hostAddress);
}
return hostList;
}
use of com.datastax.driver.core.Cluster in project beam by apache.
the class CassandraIOIT method createTable.
private static void createTable(CassandraIOITOptions options, String keyspace, String tableName) {
try (Cluster cluster = getCluster(options);
Session session = cluster.connect()) {
LOG.info("Create {} keyspace if not exists", keyspace);
session.execute("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH REPLICATION = " + "{'class':'SimpleStrategy', 'replication_factor':3};");
session.execute("USE " + keyspace);
LOG.info("Create {} table if not exists", tableName);
session.execute("CREATE TABLE IF NOT EXISTS " + tableName + "(id bigint, name text, PRIMARY " + "KEY(id))");
}
}
use of com.datastax.driver.core.Cluster in project beam by apache.
the class CassandraIO method getCluster.
/**
* Get a Cassandra cluster using hosts and port.
*/
static Cluster getCluster(ValueProvider<List<String>> hosts, ValueProvider<Integer> port, ValueProvider<String> username, ValueProvider<String> password, ValueProvider<String> localDc, ValueProvider<String> consistencyLevel, ValueProvider<Integer> connectTimeout, ValueProvider<Integer> readTimeout) {
Cluster.Builder builder = Cluster.builder().addContactPoints(hosts.get().toArray(new String[0])).withPort(port.get());
if (username != null) {
builder.withAuthProvider(new PlainTextAuthProvider(username.get(), password.get()));
}
DCAwareRoundRobinPolicy.Builder dcAwarePolicyBuilder = new DCAwareRoundRobinPolicy.Builder();
if (localDc != null) {
dcAwarePolicyBuilder.withLocalDc(localDc.get());
}
builder.withLoadBalancingPolicy(new TokenAwarePolicy(dcAwarePolicyBuilder.build()));
if (consistencyLevel != null) {
builder.withQueryOptions(new QueryOptions().setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel.get())));
}
SocketOptions socketOptions = new SocketOptions();
builder.withSocketOptions(socketOptions);
if (connectTimeout != null) {
socketOptions.setConnectTimeoutMillis(connectTimeout.get());
}
if (readTimeout != null) {
socketOptions.setReadTimeoutMillis(readTimeout.get());
}
return builder.build();
}
use of com.datastax.driver.core.Cluster in project indy by Commonjava.
the class CassandraClient method init.
@PostConstruct
private void init() {
if (!config.isEnabled()) {
logger.info("Cassandra client not enabled");
return;
}
host = config.getCassandraHost();
port = config.getCassandraPort();
SocketOptions socketOptions = new SocketOptions();
socketOptions.setConnectTimeoutMillis(config.getConnectTimeoutMillis());
socketOptions.setReadTimeoutMillis(config.getReadTimeoutMillis());
Cluster.Builder builder = Cluster.builder().withoutJMXReporting().withRetryPolicy(new ConfigurableRetryPolicy(config.getReadRetries(), config.getWriteRetries())).addContactPoint(host).withPort(port).withSocketOptions(socketOptions);
username = config.getCassandraUser();
String password = config.getCassandraPass();
if (isNotBlank(username) && isNotBlank(password)) {
logger.info("Build with credentials, user: {}, pass: ****", username);
builder.withCredentials(username, password);
}
cluster = builder.build();
}
Aggregations