use of com.datastax.driver.core.exceptions.NoHostAvailableException in project storm by apache.
the class BaseCassandraBolt method prepare.
/**
* {@inheritDoc}
*/
@Override
public void prepare(Map stormConfig, TopologyContext topologyContext, OutputCollector outputCollector) {
this.outputCollector = outputCollector;
this.stormConfig = stormConfig;
Map<String, Object> cassandraClientConfig = cassandraConfig != null ? cassandraConfig : stormConfig;
this.cassandraConf = new CassandraConf(cassandraClientConfig);
this.client = clientProvider.getClient(cassandraClientConfig);
try {
session = client.connect();
} catch (NoHostAvailableException e) {
outputCollector.reportError(e);
}
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project nifi by apache.
the class PutCassandraQL method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final long statementTimeout = context.getProperty(STATEMENT_TIMEOUT).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
String cql = getCQL(session, flowFile, charset);
try {
PreparedStatement statement = connectionSession.prepare(cql);
BoundStatement boundStatement = statement.bind();
Map<String, String> attributes = flowFile.getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
final Matcher matcher = CQL_TYPE_ATTRIBUTE_PATTERN.matcher(key);
if (matcher.matches()) {
final int parameterIndex = Integer.parseInt(matcher.group(1));
String paramType = entry.getValue();
if (StringUtils.isEmpty(paramType)) {
throw new ProcessException("Value of the " + key + " attribute is null or empty, it must contain a valid value");
}
paramType = paramType.trim();
final String valueAttrName = "cql.args." + parameterIndex + ".value";
final String parameterValue = attributes.get(valueAttrName);
try {
setStatementObject(boundStatement, parameterIndex - 1, valueAttrName, parameterValue, paramType);
} catch (final InvalidTypeException | IllegalArgumentException e) {
throw new ProcessException("The value of the " + valueAttrName + " is '" + parameterValue + "', which cannot be converted into the necessary data type: " + paramType, e);
}
}
}
try {
ResultSetFuture future = connectionSession.executeAsync(boundStatement);
if (statementTimeout > 0) {
future.getUninterruptibly(statementTimeout, TimeUnit.MILLISECONDS);
} else {
future.getUninterruptibly();
}
// Emit a Provenance SEND event
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
// This isn't a real URI but since Cassandra is distributed we just use the cluster name
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName();
session.getProvenanceReporter().send(flowFile, transitUri, transmissionMillis, true);
session.transfer(flowFile, REL_SUCCESS);
} catch (final TimeoutException e) {
throw new ProcessException(e);
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the statement with the requested consistency level successfully", qee);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryValidationException qve) {
logger.error("The CQL statement {} is invalid due to syntax error, authorization issue, or another " + "validation problem; routing {} to failure", new Object[] { cql, flowFile }, qve);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
} catch (final ProcessException e) {
logger.error("Unable to execute CQL select statement {} for {} due to {}; routing to failure", new Object[] { cql, flowFile, e });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project nifi by apache.
the class QueryCassandra method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) {
ComponentLog log = getLogger();
try {
connectToCassandra(context);
final int fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
if (fetchSize > 0) {
synchronized (cluster.get()) {
cluster.get().getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
}
} catch (final NoHostAvailableException nhae) {
log.error("No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
log.error(nhae.getCustomMessage(10, true, false));
throw new ProcessException(nhae);
} catch (final AuthenticationException ae) {
log.error("Invalid username/password combination", ae);
throw new ProcessException(ae);
}
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project nifi by apache.
the class PutCassandraQLTest method testProcessorNoHostAvailableException.
@Test
public void testProcessorNoHostAvailableException() {
setUpStandardTestConfig();
processor.setExceptionToThrow(new NoHostAvailableException(new HashMap<InetSocketAddress, Throwable>()));
testRunner.enqueue("UPDATE users SET cities = [ 'New York', 'Los Angeles' ] WHERE user_id = 'coast2coast';");
testRunner.run(1, true, true);
testRunner.assertAllFlowFilesTransferred(PutCassandraQL.REL_RETRY, 1);
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project atlasdb by palantir.
the class CqlKeyValueService method initializeConnectionPool.
@SuppressWarnings("CyclomaticComplexity")
protected void initializeConnectionPool() {
Collection<InetSocketAddress> configuredHosts = config.servers();
Cluster.Builder clusterBuilder = Cluster.builder();
clusterBuilder.addContactPointsWithPorts(configuredHosts);
// for JMX metrics
clusterBuilder.withClusterName("atlas_cassandra_cluster_" + config.getKeyspaceOrThrow());
clusterBuilder.withCompression(Compression.LZ4);
if (config.sslConfiguration().isPresent()) {
SSLContext sslContext = SslSocketFactories.createSslContext(config.sslConfiguration().get());
SSLOptions sslOptions = new SSLOptions(sslContext, SSLOptions.DEFAULT_SSL_CIPHER_SUITES);
clusterBuilder.withSSL(sslOptions);
} else if (config.ssl().isPresent() && config.ssl().get()) {
clusterBuilder.withSSL();
}
PoolingOptions poolingOptions = new PoolingOptions();
poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, config.poolSize());
poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, config.poolSize());
poolingOptions.setPoolTimeoutMillis(config.cqlPoolTimeoutMillis());
clusterBuilder.withPoolingOptions(poolingOptions);
// defaults for queries; can override on per-query basis
QueryOptions queryOptions = new QueryOptions();
queryOptions.setFetchSize(config.fetchBatchCount());
clusterBuilder.withQueryOptions(queryOptions);
// Refuse to talk to nodes twice as (latency-wise) slow as the best one, over a timescale of 100ms,
// and every 10s try to re-evaluate ignored nodes performance by giving them queries again.
// Note we are being purposely datacenter-irreverent here, instead relying on latency alone
// to approximate what DCAwareRR would do;
// this is because DCs for Atlas are always quite latency-close and should be used this way,
// not as if we have some cross-country backup DC.
LoadBalancingPolicy policy = LatencyAwarePolicy.builder(new RoundRobinPolicy()).build();
// If user wants, do not automatically add in new nodes to pool (useful during DC migrations / rebuilds)
if (!config.autoRefreshNodes()) {
policy = new WhiteListPolicy(policy, configuredHosts);
}
// also try and select coordinators who own the data we're talking about to avoid an extra hop,
// but also shuffle which replica we talk to for a load balancing that comes at the expense
// of less effective caching
policy = new TokenAwarePolicy(policy, true);
clusterBuilder.withLoadBalancingPolicy(policy);
Metadata metadata;
try {
cluster = clusterBuilder.build();
// special; this is the first place we connect to
metadata = cluster.getMetadata();
// hosts, this is where people will see failures
} catch (NoHostAvailableException e) {
if (e.getMessage().contains("Unknown compression algorithm")) {
clusterBuilder.withCompression(Compression.NONE);
cluster = clusterBuilder.build();
metadata = cluster.getMetadata();
} else {
throw e;
}
} catch (IllegalStateException e) {
// god dammit datastax what did I do to _you_
if (e.getMessage().contains("requested compression is not available")) {
clusterBuilder.withCompression(Compression.NONE);
cluster = clusterBuilder.build();
metadata = cluster.getMetadata();
} else {
throw e;
}
}
session = cluster.connect();
clusterBuilder.withSocketOptions(new SocketOptions().setReadTimeoutMillis(CassandraConstants.LONG_RUNNING_QUERY_SOCKET_TIMEOUT_MILLIS));
longRunningQueryCluster = clusterBuilder.build();
longRunningQuerySession = longRunningQueryCluster.connect();
cqlStatementCache = new CqlStatementCache(session, longRunningQuerySession);
cqlKeyValueServices = new CqlKeyValueServices();
if (log.isInfoEnabled()) {
StringBuilder hostInfo = new StringBuilder();
for (Host host : metadata.getAllHosts()) {
hostInfo.append(String.format("Datatacenter: %s; Host: %s; Rack: %s%n", host.getDatacenter(), host.getAddress(), host.getRack()));
}
log.info("Initialized cassandra cluster using new API with hosts {}, seen keyspaces {}, cluster name {}", hostInfo.toString(), metadata.getKeyspaces(), metadata.getClusterName());
}
}
Aggregations