use of com.datastax.driver.core.Statement in project storm by apache.
the class AsyncExecutor method execAsync.
/**
* Asynchronously executes all statements associated to the specified input.
* The input will be passed to handler#onSuccess once all queries succeed or to handler#onFailure if any one of them fails.
*/
public List<SettableFuture<T>> execAsync(List<Statement> statements, final T input) {
List<SettableFuture<T>> settableFutures = new ArrayList<>(statements.size());
for (Statement s : statements) settableFutures.add(execAsync(s, input, AsyncResultHandler.NO_OP_HANDLER));
ListenableFuture<List<T>> allAsList = Futures.allAsList(settableFutures);
Futures.addCallback(allAsList, new FutureCallback<List<T>>() {
@Override
public void onSuccess(List<T> inputs) {
handler.success(input);
}
@Override
public void onFailure(Throwable t) {
handler.failure(t, input);
}
}, executorService);
return settableFutures;
}
use of com.datastax.driver.core.Statement in project zeppelin by apache.
the class InterpreterLogicTest method should_generate_batch_statement.
@Test
public void should_generate_batch_statement() throws Exception {
//Given
Statement st1 = new SimpleStatement("SELECT * FROM users LIMIT 10;");
Statement st2 = new SimpleStatement("INSERT INTO users(id) VALUES(10);");
Statement st3 = new SimpleStatement("UPDATE users SET name = 'John DOE' WHERE id=10;");
CassandraQueryOptions options = new CassandraQueryOptions(Option.apply(QUORUM), Option.<ConsistencyLevel>empty(), Option.empty(), Option.<RetryPolicy>empty(), Option.empty(), Option.empty());
//When
BatchStatement actual = helper.generateBatchStatement(UNLOGGED, options, toScalaList(asList(st1, st2, st3)));
//Then
assertThat(actual).isNotNull();
final List<Statement> statements = new ArrayList<>(actual.getStatements());
assertThat(statements).hasSize(3);
assertThat(statements.get(0)).isSameAs(st1);
assertThat(statements.get(1)).isSameAs(st2);
assertThat(statements.get(2)).isSameAs(st3);
assertThat(actual.getConsistencyLevel()).isSameAs(QUORUM);
}
use of com.datastax.driver.core.Statement in project newts by OpenNMS.
the class CassandraIndexerTest method insertStatementsAreDeduplicatedWhenIndexingManySamples.
@Test
public void insertStatementsAreDeduplicatedWhenIndexingManySamples() {
CassandraSession session = mock(CassandraSession.class);
ArgumentCaptor<Statement> statementCaptor = ArgumentCaptor.forClass(Statement.class);
when(session.executeAsync(statementCaptor.capture())).thenReturn(mock(ResultSetFuture.class));
PreparedStatement statement = mock(PreparedStatement.class);
BoundStatement boundStatement = mock(BoundStatement.class);
when(session.prepare(any(RegularStatement.class))).thenReturn(statement);
when(statement.bind()).thenReturn(boundStatement);
when(boundStatement.setString(any(String.class), any(String.class))).thenReturn(boundStatement);
CassandraIndexingOptions options = new CassandraIndexingOptions.Builder().withHierarchicalIndexing(true).withMaxBatchSize(1).build();
MetricRegistry registry = new MetricRegistry();
GuavaResourceMetadataCache cache = new GuavaResourceMetadataCache(2048, registry);
CassandraIndexer indexer = new CassandraIndexer(session, 0, cache, registry, options, new EscapableResourceIdSplitter(), new ContextConfigurations());
Resource r = new Resource("snmp:1589:vmware5Cpu:2:vmware5Cpu");
List<Sample> samples = Lists.newArrayList();
samples.add(new Sample(Timestamp.now(), r, "CpuCostopSum", MetricType.GAUGE, new Gauge(0)));
samples.add(new Sample(Timestamp.now(), r, "CpuIdleSum", MetricType.GAUGE, new Gauge(19299.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuMaxLdSum", MetricType.GAUGE, new Gauge(0)));
samples.add(new Sample(Timestamp.now(), r, "CpuOverlapSum", MetricType.GAUGE, new Gauge(5.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuRdySum", MetricType.GAUGE, new Gauge(41.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuRunSum", MetricType.GAUGE, new Gauge(619.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuSpwaitSum", MetricType.GAUGE, new Gauge(0)));
samples.add(new Sample(Timestamp.now(), r, "CpuSystemSum", MetricType.GAUGE, new Gauge(0)));
samples.add(new Sample(Timestamp.now(), r, "CpuUsagemhzAvg", MetricType.GAUGE, new Gauge(32.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuUsedSum", MetricType.GAUGE, new Gauge(299.0)));
samples.add(new Sample(Timestamp.now(), r, "CpuWaitSum", MetricType.GAUGE, new Gauge(19343)));
// Index the collection of samples
indexer.update(samples);
// Verify the number of exectuteAsync calls
verify(session, times(20)).executeAsync(any(Statement.class));
}
use of com.datastax.driver.core.Statement in project cassandra by apache.
the class IndexQueryPagingTest method executePagingQuery.
private void executePagingQuery(String cql, int rowCount) {
// Execute an index query which should return all rows,
// setting the fetch size < than the row count. Assert
// that all rows are returned, so we know that paging
// of the results was involved.
Session session = sessionNet();
Statement stmt = new SimpleStatement(String.format(cql, KEYSPACE + '.' + currentTable()));
stmt.setFetchSize(rowCount - 1);
assertEquals(rowCount, session.execute(stmt).all().size());
}
use of com.datastax.driver.core.Statement in project YCSB by brianfrankcooper.
the class CassandraCQLClient method scan.
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* Cassandra CQL uses "token" method for range scan which doesn't always yield
* intuitive results.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
Statement stmt;
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = stmt.toString();
StringBuilder scanStmt = new StringBuilder();
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1));
scanStmt.append(" WHERE ");
scanStmt.append(QueryBuilder.token(YCSB_KEY));
scanStmt.append(" >= ");
scanStmt.append("token('");
scanStmt.append(startkey);
scanStmt.append("')");
scanStmt.append(" LIMIT ");
scanStmt.append(recordcount);
stmt = new SimpleStatement(scanStmt.toString());
stmt.setConsistencyLevel(readConsistencyLevel);
if (debug) {
System.out.println(stmt.toString());
}
if (trace) {
stmt.enableTracing();
}
ResultSet rs = session.execute(stmt);
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<String, ByteIterator>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error scanning with startkey: " + startkey);
return Status.ERROR;
}
}
Aggregations