use of org.locationtech.geowave.datastore.cassandra.CassandraRow in project geowave by locationtech.
the class CassandraReader method initScanner.
protected void initScanner() {
final Collection<SinglePartitionQueryRanges> ranges = readerParams.getQueryRanges().getPartitionQueryRanges();
if ((ranges != null) && !ranges.isEmpty()) {
iterator = operations.getBatchedRangeRead(readerParams.getIndex().getName(), readerParams.getAdapterIds(), ranges, DataStoreUtils.isMergingIteratorRequired(readerParams, visibilityEnabled), rowTransformer, new ClientVisibilityFilter(Sets.newHashSet(readerParams.getAdditionalAuthorizations()))).results();
} else {
// TODO figure out the query select by adapter IDs here
final Select select = operations.getSelect(readerParams.getIndex().getName());
CloseableIterator<CassandraRow> results = operations.executeQuery(select.build());
if ((readerParams.getAdapterIds() != null) && (readerParams.getAdapterIds().length > 0)) {
// TODO because we aren't filtering server-side by adapter ID,
// we will need to filter here on the client
results = new CloseableIteratorWrapper<>(results, Iterators.filter(results, input -> ArrayUtils.contains(readerParams.getAdapterIds(), input.getAdapterId())));
}
iterator = wrapResults(results, readerParams);
}
}
use of org.locationtech.geowave.datastore.cassandra.CassandraRow in project geowave by locationtech.
the class BatchedRangeRead method executeQueryAsync.
public CloseableIterator<T> executeQueryAsync(final Statement... statements) {
// first create a list of asynchronous query executions
final List<CompletionStage<AsyncResultSet>> futures = Lists.newArrayListWithExpectedSize(statements.length);
final BlockingQueue<Object> results = new LinkedBlockingQueue<>(MAX_BOUNDED_READS_ENQUEUED);
new Thread(new Runnable() {
@Override
public void run() {
// set it to 1 to make sure all queries are submitted in
// the loop
final AtomicInteger queryCount = new AtomicInteger(1);
for (final Statement s : statements) {
try {
readSemaphore.acquire();
final CompletionStage<AsyncResultSet> f = operations.getSession().executeAsync(s);
synchronized (futures) {
futures.add(f);
}
queryCount.incrementAndGet();
f.whenCompleteAsync((result, t) -> {
if (result != null) {
try {
final Iterator<GeoWaveRow> iterator = (Iterator) Streams.stream(ResultSets.newInstance(result)).map(row -> new CassandraRow(row)).filter(filter).iterator();
rowTransformer.apply(rowMerging ? new GeoWaveRowMergingIterator(iterator) : iterator).forEachRemaining(row -> {
try {
results.put(row);
} catch (final InterruptedException e) {
LOGGER.warn("interrupted while waiting to enqueue a cassandra result", e);
}
});
} finally {
checkFinalize(queryCount, results, readSemaphore);
}
} else if (t != null) {
checkFinalize(queryCount, results, readSemaphore);
// can do logging or start counting errors.
if (!(t instanceof CancellationException)) {
LOGGER.error("Failure from async query", t);
throw new RuntimeException(t);
}
}
});
} catch (final InterruptedException e) {
LOGGER.warn("Exception while executing query", e);
readSemaphore.release();
}
}
// then decrement
if (queryCount.decrementAndGet() <= 0) {
// statements submitted
try {
results.put(RowConsumer.POISON);
} catch (final InterruptedException e) {
LOGGER.error("Interrupted while finishing blocking queue, this may result in deadlock!");
}
}
}
}, "Cassandra Query Executor").start();
return new CloseableIteratorWrapper<T>(new Closeable() {
@Override
public void close() throws IOException {
synchronized (futures) {
for (final CompletionStage<AsyncResultSet> f : futures) {
f.toCompletableFuture().cancel(true);
}
}
}
}, new RowConsumer(results));
}
Aggregations