use of com.datastax.driver.core.Statement in project newts by OpenNMS.
the class CassandraIndexer method update.
@Override
public void update(Collection<Sample> samples) {
Timer.Context ctx = m_updateTimer.time();
Set<StatementGenerator> generators = Sets.newHashSet();
Map<Context, Map<Resource, ResourceMetadata>> cacheQueue = Maps.newHashMap();
for (Sample sample : samples) {
maybeIndexResource(cacheQueue, generators, sample.getContext(), sample.getResource());
maybeIndexResourceAttributes(cacheQueue, generators, sample.getContext(), sample.getResource());
maybeAddMetricName(cacheQueue, generators, sample.getContext(), sample.getResource(), sample.getName());
}
try {
if (!generators.isEmpty()) {
synchronized (statementsInFlight) {
generators.removeAll(statementsInFlight);
statementsInFlight.addAll(generators);
}
m_inserts.mark(generators.size());
// Asynchronously execute the statements
List<ResultSetFuture> futures = Lists.newArrayList();
for (Statement statementToExecute : toStatements(generators)) {
futures.add(m_session.executeAsync(statementToExecute));
}
for (ResultSetFuture future : futures) {
future.getUninterruptibly();
}
}
// Order matters here; We want the cache updated only after a successful Cassandra write.
for (Context context : cacheQueue.keySet()) {
for (Map.Entry<Resource, ResourceMetadata> entry : cacheQueue.get(context).entrySet()) {
m_cache.merge(context, entry.getKey(), entry.getValue());
}
}
} finally {
synchronized (statementsInFlight) {
statementsInFlight.removeAll(generators);
}
ctx.stop();
}
}
use of com.datastax.driver.core.Statement in project newts by OpenNMS.
the class StatementUtils method getStatements.
public static List<Statement> getStatements(ContextConfigurations contextConfigurations, int maxBatchSize, Set<StatementGenerator> generators) {
List<Statement> statementsToExecute = Lists.newArrayList();
Map<String, List<Statement>> statementsByKey = Maps.newHashMap();
for (StatementGenerator generator : generators) {
Statement statement = generator.toStatement().setConsistencyLevel(contextConfigurations.getWriteConsistency(generator.getContext()));
String key = generator.getKey();
if (key == null) {
// Don't try batching these
statementsToExecute.add(statement);
continue;
}
// Group these by key
List<Statement> statementsForKey = statementsByKey.get(key);
if (statementsForKey == null) {
statementsForKey = Lists.newArrayList();
statementsByKey.put(key, statementsForKey);
}
statementsForKey.add(statement);
}
// Consolidate the grouped statements into batches
for (List<Statement> statementsForKey : statementsByKey.values()) {
for (List<Statement> partition : Lists.partition(statementsForKey, maxBatchSize)) {
statementsToExecute.add(unloggedBatch(partition.toArray(new RegularStatement[partition.size()])));
}
}
return statementsToExecute;
}
use of com.datastax.driver.core.Statement in project presto by prestodb.
the class CassandraTokenSplitManager method getSizeEstimates.
private List<SizeEstimate> getSizeEstimates(String keyspaceName, String tableName) {
checkSizeEstimatesTableExist();
Statement statement = select("range_start", "range_end", "mean_partition_size", "partitions_count").from(SYSTEM, SIZE_ESTIMATES).where(eq("keyspace_name", keyspaceName)).and(eq("table_name", tableName));
ResultSet result = session.executeWithSession(session -> session.execute(statement));
ImmutableList.Builder<SizeEstimate> estimates = ImmutableList.builder();
for (Row row : result.all()) {
SizeEstimate estimate = new SizeEstimate(row.getString("range_start"), row.getString("range_end"), row.getLong("mean_partition_size"), row.getLong("partitions_count"));
estimates.add(estimate);
}
return estimates.build();
}
use of com.datastax.driver.core.Statement in project ignite by apache.
the class CassandraSessionImpl method execute.
/** {@inheritDoc} */
@Override
public <V> V execute(ExecutionAssistant<V> assistant) {
int attempt = 0;
Throwable error = null;
String errorMsg = "Failed to execute Cassandra CQL statement: " + assistant.getStatement();
RandomSleeper sleeper = newSleeper();
incrementSessionRefs();
try {
while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
error = null;
if (attempt != 0) {
log.warning("Trying " + (attempt + 1) + " attempt to execute Cassandra CQL statement: " + assistant.getStatement());
}
try {
PreparedStatement preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(), assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
if (preparedSt == null)
return null;
Statement statement = tuneStatementExecutionOptions(assistant.bindStatement(preparedSt));
ResultSet res = session().execute(statement);
Row row = res == null || !res.iterator().hasNext() ? null : res.iterator().next();
return row == null ? null : assistant.process(row);
} catch (Throwable e) {
error = e;
if (CassandraHelper.isTableAbsenceError(e)) {
if (!assistant.tableExistenceRequired()) {
log.warning(errorMsg, e);
return null;
}
handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
} else if (CassandraHelper.isHostsAvailabilityError(e))
handleHostsAvailabilityError(e, attempt, errorMsg);
else if (CassandraHelper.isPreparedStatementClusterError(e))
handlePreparedStatementClusterError(e);
else
// For an error which we don't know how to handle, we will not try next attempts and terminate.
throw new IgniteException(errorMsg, e);
}
if (!CassandraHelper.isTableAbsenceError(error))
sleeper.sleep();
attempt++;
}
} catch (Throwable e) {
error = e;
} finally {
decrementSessionRefs();
}
log.error(errorMsg, error);
throw new IgniteException(errorMsg, error);
}
Aggregations