use of java.util.concurrent.CompletionException in project crate by crate.
the class DistributingConsumerTest method testDistributingConsumerForwardsFailure.
@Test
public void testDistributingConsumerForwardsFailure() throws Exception {
Streamer<?>[] streamers = { DataTypes.INTEGER.streamer() };
TestingBatchConsumer collectingConsumer = new TestingBatchConsumer();
PageDownstreamContext pageDownstreamContext = createPageDownstreamContext(streamers, collectingConsumer);
TransportDistributedResultAction distributedResultAction = createFakeTransport(streamers, pageDownstreamContext);
DistributingConsumer distributingConsumer = createDistributingConsumer(streamers, distributedResultAction);
distributingConsumer.accept(null, new CompletionException(new IllegalArgumentException("foobar")));
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("foobar");
collectingConsumer.getResult();
}
use of java.util.concurrent.CompletionException in project hbase by apache.
the class TestAsyncTableAdminApi method testCreateTableWithOnlyEmptyStartRow.
@Test(timeout = 300000)
public void testCreateTableWithOnlyEmptyStartRow() throws IOException {
byte[] tableName = Bytes.toBytes(name.getMethodName());
byte[][] splitKeys = new byte[1][];
splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor("col"));
try {
admin.createTable(desc, splitKeys).join();
fail("Test case should fail as empty split key is passed.");
} catch (CompletionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
use of java.util.concurrent.CompletionException in project caffeine by ben-manes.
the class CacheLoader method asyncLoad.
/**
* Asynchronously computes or retrieves the value corresponding to {@code key}.
*
* @param key the non-null key whose value should be loaded
* @param executor the executor that asynchronously loads the entry
* @return the future value associated with {@code key}
*/
@Override
@Nonnull
default default CompletableFuture<V> asyncLoad(@Nonnull K key, @Nonnull Executor executor) {
requireNonNull(key);
requireNonNull(executor);
return CompletableFuture.supplyAsync(() -> {
try {
return load(key);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new CompletionException(e);
}
}, executor);
}
use of java.util.concurrent.CompletionException in project caffeine by ben-manes.
the class LocalLoadingCache method bulkLoad.
/**
* Performs a non-blocking bulk load of the missing keys. Any missing entry that materializes
* during the load are replaced when the loaded entries are inserted into the cache.
*/
default default void bulkLoad(Set<K> keysToLoad, Map<K, V> result) {
boolean success = false;
long startTime = cache().statsTicker().read();
try {
@SuppressWarnings("unchecked") Map<K, V> loaded = (Map<K, V>) cacheLoader().loadAll(keysToLoad);
loaded.forEach((key, value) -> {
cache().put(key, value, /* notifyWriter */
false);
if (keysToLoad.contains(key)) {
result.put(key, value);
}
});
success = !loaded.isEmpty();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new CompletionException(e);
} finally {
long loadTime = cache().statsTicker().read() - startTime;
if (success) {
cache().statsCounter().recordLoadSuccess(loadTime);
} else {
cache().statsCounter().recordLoadFailure(loadTime);
}
}
}
use of java.util.concurrent.CompletionException in project torodb by torodb.
the class DefaultOplogApplier method apply.
@Override
public ApplyingJob apply(OplogFetcher fetcher, ApplierContext applierContext) {
Materializer materializer = ActorMaterializer.create(actorSystem);
RunnableGraph<Pair<UniqueKillSwitch, CompletionStage<Done>>> graph = createOplogSource(fetcher).async().via(createBatcherFlow(applierContext)).viaMat(KillSwitches.single(), Keep.right()).async().map(analyzedElem -> {
for (AnalyzedOplogBatch analyzedOplogBatch : analyzedElem.analyzedBatch) {
batchExecutor.apply(analyzedOplogBatch, applierContext);
}
return analyzedElem;
}).map(this::metricExecution).toMat(Sink.foreach(this::storeLastAppliedOp), (killSwitch, completionStage) -> new Pair<>(killSwitch, completionStage));
Pair<UniqueKillSwitch, CompletionStage<Done>> pair = graph.run(materializer);
UniqueKillSwitch killSwitch = pair.first();
CompletableFuture<Empty> whenComplete = pair.second().toCompletableFuture().thenApply(done -> Empty.getInstance()).whenComplete((done, t) -> {
fetcher.close();
if (done != null) {
LOGGER.trace("Oplog replication stream finished normally");
} else {
Throwable cause;
if (t instanceof CompletionException) {
cause = t.getCause();
} else {
cause = t;
}
//the completable future has been cancelled
if (cause instanceof CancellationException) {
LOGGER.debug("Oplog replication stream has been cancelled");
killSwitch.shutdown();
} else {
//in this case the exception should came from the stream
cause = Throwables.getRootCause(cause);
LOGGER.error("Oplog replication stream finished exceptionally: " + cause.getLocalizedMessage(), cause);
//the stream should be finished exceptionally, but just in case we
//notify the kill switch to stop the stream.
killSwitch.shutdown();
}
}
});
return new DefaultApplyingJob(killSwitch, whenComplete);
}
Aggregations