use of com.datastax.driver.core.ResultSetFuture in project flink by apache.
the class CassandraOutputFormat method writeRecord.
@Override
public void writeRecord(OUT record) throws IOException {
if (exception != null) {
throw new IOException("write record failed", exception);
}
Object[] fields = new Object[record.getArity()];
for (int i = 0; i < record.getArity(); i++) {
fields[i] = record.getField(i);
}
ResultSetFuture result = session.executeAsync(prepared.bind(fields));
Futures.addCallback(result, callback);
}
use of com.datastax.driver.core.ResultSetFuture in project nifi by apache.
the class PutCassandraQL method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final long statementTimeout = context.getProperty(STATEMENT_TIMEOUT).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
String cql = getCQL(session, flowFile, charset);
try {
PreparedStatement statement = connectionSession.prepare(cql);
BoundStatement boundStatement = statement.bind();
Map<String, String> attributes = flowFile.getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
final Matcher matcher = CQL_TYPE_ATTRIBUTE_PATTERN.matcher(key);
if (matcher.matches()) {
final int parameterIndex = Integer.parseInt(matcher.group(1));
String paramType = entry.getValue();
if (StringUtils.isEmpty(paramType)) {
throw new ProcessException("Value of the " + key + " attribute is null or empty, it must contain a valid value");
}
paramType = paramType.trim();
final String valueAttrName = "cql.args." + parameterIndex + ".value";
final String parameterValue = attributes.get(valueAttrName);
try {
setStatementObject(boundStatement, parameterIndex - 1, valueAttrName, parameterValue, paramType);
} catch (final InvalidTypeException | IllegalArgumentException e) {
throw new ProcessException("The value of the " + valueAttrName + " is '" + parameterValue + "', which cannot be converted into the necessary data type: " + paramType, e);
}
}
}
try {
ResultSetFuture future = connectionSession.executeAsync(boundStatement);
if (statementTimeout > 0) {
future.getUninterruptibly(statementTimeout, TimeUnit.MILLISECONDS);
} else {
future.getUninterruptibly();
}
// Emit a Provenance SEND event
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
// This isn't a real URI but since Cassandra is distributed we just use the cluster name
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName();
session.getProvenanceReporter().send(flowFile, transitUri, transmissionMillis, true);
session.transfer(flowFile, REL_SUCCESS);
} catch (final TimeoutException e) {
throw new ProcessException(e);
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the statement with the requested consistency level successfully", qee);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryValidationException qve) {
logger.error("The CQL statement {} is invalid due to syntax error, authorization issue, or another " + "validation problem; routing {} to failure", new Object[] { cql, flowFile }, qve);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
} catch (final ProcessException e) {
logger.error("Unable to execute CQL select statement {} for {} due to {}; routing to failure", new Object[] { cql, flowFile, e });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
use of com.datastax.driver.core.ResultSetFuture in project SimpleFlatMapper by arnaudroger.
the class DatastaxCrud method deleteAsync.
public UninterruptibleFuture<Void> deleteAsync(K key) {
BoundStatement boundStatement = deleteQuery(key);
ResultSetFuture resultSetFuture = session.executeAsync(boundStatement);
return new NoResultFuture(resultSetFuture);
}
use of com.datastax.driver.core.ResultSetFuture in project SimpleFlatMapper by arnaudroger.
the class DatastaxCrud method deleteAsync.
public UninterruptibleFuture<Void> deleteAsync(K key, long timestamp) {
BoundStatement boundStatement = deleteQuery(key, timestamp);
ResultSetFuture resultSetFuture = session.executeAsync(boundStatement);
return new NoResultFuture(resultSetFuture);
}
use of com.datastax.driver.core.ResultSetFuture in project thingsboard by thingsboard.
the class CassandraDeviceDao method findTenantDeviceTypesAsync.
@Override
public ListenableFuture<List<EntitySubtype>> findTenantDeviceTypesAsync(UUID tenantId) {
Select select = select().from(ENTITY_SUBTYPE_COLUMN_FAMILY_NAME);
Select.Where query = select.where();
query.and(eq(ENTITY_SUBTYPE_TENANT_ID_PROPERTY, tenantId));
query.and(eq(ENTITY_SUBTYPE_ENTITY_TYPE_PROPERTY, EntityType.DEVICE));
query.setConsistencyLevel(cluster.getDefaultReadConsistencyLevel());
ResultSetFuture resultSetFuture = executeAsyncRead(query);
return Futures.transform(resultSetFuture, new Function<ResultSet, List<EntitySubtype>>() {
@Nullable
@Override
public List<EntitySubtype> apply(@Nullable ResultSet resultSet) {
Result<EntitySubtypeEntity> result = cluster.getMapper(EntitySubtypeEntity.class).map(resultSet);
if (result != null) {
List<EntitySubtype> entitySubtypes = new ArrayList<>();
result.all().forEach((entitySubtypeEntity) -> entitySubtypes.add(entitySubtypeEntity.toEntitySubtype()));
return entitySubtypes;
} else {
return Collections.emptyList();
}
}
});
}
Aggregations