use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.
the class LifecycleTest method testConcurrentStartStopOnce.
@Test
public void testConcurrentStartStopOnce() throws Exception {
final int numThreads = 10;
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
final Lifecycle lifecycle = new Lifecycle();
final AtomicLong handlerFailedCount = new AtomicLong(0L);
final Lifecycle.Handler exceptionalHandler = new Lifecycle.Handler() {
final AtomicBoolean started = new AtomicBoolean(false);
@Override
public void start() {
if (!started.compareAndSet(false, true)) {
handlerFailedCount.incrementAndGet();
throw new ISE("Already started");
}
}
@Override
public void stop() {
if (!started.compareAndSet(true, false)) {
handlerFailedCount.incrementAndGet();
throw new ISE("Not yet started started");
}
}
};
lifecycle.addHandler(exceptionalHandler);
Collection<ListenableFuture<?>> futures = new ArrayList<>(numThreads);
final AtomicBoolean threadsStartLatch = new AtomicBoolean(false);
final AtomicInteger threadFailedCount = new AtomicInteger(0);
for (int i = 0; i < numThreads; ++i) {
futures.add(executorService.submit(() -> {
try {
while (!threadsStartLatch.get()) {
// await
}
lifecycle.start();
} catch (Exception e) {
threadFailedCount.incrementAndGet();
}
}));
}
try {
threadsStartLatch.set(true);
Futures.allAsList(futures).get();
} finally {
lifecycle.stop();
}
Assert.assertEquals(numThreads - 1, threadFailedCount.get());
Assert.assertEquals(0, handlerFailedCount.get());
executorService.shutdownNow();
}
use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.
the class NamespaceExtractionCacheManagersTest method testRacyCreation.
@Test(timeout = 60_000L)
public void testRacyCreation() throws Exception {
final int concurrentThreads = 10;
final ListeningExecutorService service = MoreExecutors.listeningDecorator(Execs.multiThreaded(concurrentThreads, "offheaptest-%s"));
final List<ListenableFuture<?>> futures = new ArrayList<>();
final CountDownLatch thunder = new CountDownLatch(1);
try {
for (int i = 0; i < concurrentThreads; ++i) {
futures.add(service.submit(new Runnable() {
@Override
public void run() {
try {
thunder.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < 1000; ++i) {
CacheHandler cacheHandler = manager.createCache();
cacheHandler.close();
}
}
}));
}
thunder.countDown();
Futures.allAsList(futures).get();
} finally {
service.shutdown();
service.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
}
Assert.assertEquals(0, manager.cacheCount());
}
use of com.google.common.util.concurrent.ListeningExecutorService in project snow-owl by b2ihealthcare.
the class Promise method wrap.
/**
* @param func - the function to wrap into a {@link Promise}
* @return
* @since 4.6
*/
@Beta
public static <T> Promise<T> wrap(final Callable<T> func) {
final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
final ListenableFuture<T> submit = executor.submit(func);
executor.shutdown();
return wrap(submit);
}
use of com.google.common.util.concurrent.ListeningExecutorService in project crate by crate.
the class ThreadPools method runWithAvailableThreads.
/**
* runs each runnable of the runnableCollection in it's own thread unless there aren't enough threads available.
* In that case it will partition the runnableCollection to match the number of available threads.
*
* @param executor executor that is used to execute the callableList
* @param poolSize the corePoolSize of the given executor
* @param callableCollection a collection of callable that should be executed
* @param mergeFunction function that will be applied to merge the results of multiple callable in case that they are
* executed together if the threadPool is exhausted
* @param <T> type of the final result
* @return a future that will return a list of the results of the callableList
* @throws RejectedExecutionException in case all threads are busy and overloaded.
*/
public static <T> ListenableFuture<List<T>> runWithAvailableThreads(ThreadPoolExecutor executor, int poolSize, Collection<Callable<T>> callableCollection, final Function<List<T>, T> mergeFunction) throws RejectedExecutionException {
ListeningExecutorService listeningExecutorService = MoreExecutors.listeningDecorator(executor);
List<ListenableFuture<T>> futures;
int availableThreads = Math.max(poolSize - executor.getActiveCount(), 1);
if (availableThreads < callableCollection.size()) {
Iterable<List<Callable<T>>> partition = Iterables.partition(callableCollection, callableCollection.size() / availableThreads);
futures = new ArrayList<>(availableThreads + 1);
for (final List<Callable<T>> callableList : partition) {
futures.add(listeningExecutorService.submit(new Callable<T>() {
@Override
public T call() throws Exception {
List<T> results = new ArrayList<T>(callableList.size());
for (Callable<T> tCallable : callableList) {
results.add(tCallable.call());
}
return mergeFunction.apply(results);
}
}));
}
} else {
futures = new ArrayList<>(callableCollection.size());
for (Callable<T> callable : callableCollection) {
futures.add(listeningExecutorService.submit(callable));
}
}
return Futures.allAsList(futures);
}
use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.
the class SegmentMetadataQueryRunnerFactory method mergeRunners.
@Override
public QueryRunner<SegmentAnalysis> mergeRunners(ExecutorService exec, Iterable<QueryRunner<SegmentAnalysis>> queryRunners) {
final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(exec);
return new ConcatQueryRunner<SegmentAnalysis>(Sequences.map(Sequences.simple(queryRunners), new Function<QueryRunner<SegmentAnalysis>, QueryRunner<SegmentAnalysis>>() {
@Override
public QueryRunner<SegmentAnalysis> apply(final QueryRunner<SegmentAnalysis> input) {
return new QueryRunner<SegmentAnalysis>() {
@Override
public Sequence<SegmentAnalysis> run(final Query<SegmentAnalysis> query, final Map<String, Object> responseContext) {
final int priority = BaseQuery.getContextPriority(query, 0);
ListenableFuture<Sequence<SegmentAnalysis>> future = queryExecutor.submit(new AbstractPrioritizedCallable<Sequence<SegmentAnalysis>>(priority) {
@Override
public Sequence<SegmentAnalysis> call() throws Exception {
return Sequences.simple(Sequences.toList(input.run(query, responseContext), new ArrayList<SegmentAnalysis>()));
}
});
try {
queryWatcher.registerQuery(query, future);
final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
return timeout == null ? future.get() : future.get(timeout.longValue(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
future.cancel(true);
throw new QueryInterruptedException(e);
} catch (CancellationException e) {
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
future.cancel(true);
throw new QueryInterruptedException(e);
} catch (ExecutionException e) {
throw Throwables.propagate(e.getCause());
}
}
};
}
}));
}
Aggregations