use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class ProjectBuildFileParserPoolTest method ignoresCancellation.
@Test
public void ignoresCancellation() throws Exception {
Cell cell = EasyMock.createMock(Cell.class);
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
int numberOfJobs = 5;
final CountDownLatch waitTillAllWorkIsDone = new CountDownLatch(numberOfJobs);
final CountDownLatch waitTillCanceled = new CountDownLatch(1);
try (ProjectBuildFileParserPool parserPool = new ProjectBuildFileParserPool(/* maxParsers */
1, createMockParserFactory(() -> {
waitTillCanceled.await();
waitTillAllWorkIsDone.countDown();
return ImmutableList.of();
}))) {
ImmutableSet<ListenableFuture<?>> futures = scheduleWork(cell, parserPool, executorService, numberOfJobs);
for (ListenableFuture<?> future : futures) {
future.cancel(true);
}
waitTillCanceled.countDown();
// We're making sure cancel is ignored by the pool by waiting for the supposedly canceled
// work to go through.
waitTillAllWorkIsDone.await(1, TimeUnit.SECONDS);
} finally {
executorService.shutdown();
}
}
use of com.google.common.util.concurrent.ListenableFuture in project buck by facebook.
the class ProjectBuildFileParserPoolTest method closeWhenRunningJobs.
@Test
public void closeWhenRunningJobs() throws Exception {
Cell cell = EasyMock.createMock(Cell.class);
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
final CountDownLatch waitTillClosed = new CountDownLatch(1);
final CountDownLatch firstJobRunning = new CountDownLatch(1);
final AtomicInteger postCloseWork = new AtomicInteger(0);
ImmutableSet<ListenableFuture<?>> futures;
try (ProjectBuildFileParserPool parserPool = new ProjectBuildFileParserPool(/* maxParsers */
1, createMockParserFactory(() -> {
firstJobRunning.countDown();
waitTillClosed.await();
return ImmutableList.of();
}))) {
futures = scheduleWork(cell, parserPool, executorService, 5);
for (ListenableFuture<?> future : futures) {
Futures.addCallback(future, new FutureCallback<Object>() {
@Override
public void onSuccess(@Nullable Object result) {
postCloseWork.incrementAndGet();
}
@Override
public void onFailure(Throwable t) {
}
});
}
firstJobRunning.await(1, TimeUnit.SECONDS);
}
waitTillClosed.countDown();
List<Object> futureResults = Futures.successfulAsList(futures).get(1, TimeUnit.SECONDS);
// The threadpool is of size 1, so we had 1 job in the 'running' state. That one job completed
// normally, the rest should have been cancelled.
int expectedCompletedJobs = 1;
int completedJobs = FluentIterable.from(futureResults).filter(Objects::nonNull).size();
assertThat(completedJobs, Matchers.equalTo(expectedCompletedJobs));
executorService.shutdown();
assertThat(executorService.awaitTermination(1, TimeUnit.SECONDS), Matchers.is(true));
assertThat(postCloseWork.get(), Matchers.equalTo(expectedCompletedJobs));
}
use of com.google.common.util.concurrent.ListenableFuture in project pinpoint by naver.
the class ActiveTraceRepositoryTest method executeTransactions.
private ListenableFuture<List<TraceThreadTuple>> executeTransactions(CountDownLatch awaitLatch, CountDownLatch executeLatch, int newTransactionCount, int sampledContinuationCount, int unsampledContinuationCount) {
final int totalTransactionCount = newTransactionCount + sampledContinuationCount + unsampledContinuationCount;
final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(totalTransactionCount));
final List<ListenableFuture<TraceThreadTuple>> futures = new ArrayList<ListenableFuture<TraceThreadTuple>>();
for (int i = 0; i < newTransactionCount; ++i) {
futures.add(executeNewTrace(executor, awaitLatch, executeLatch));
}
for (int i = 0; i < sampledContinuationCount; ++i) {
futures.add(executeSampledContinuedTrace(executor, awaitLatch, executeLatch, i));
}
for (int i = 0; i < unsampledContinuationCount; ++i) {
futures.add(executeUnsampledContinuedTrace(executor, awaitLatch, executeLatch));
}
return Futures.allAsList(futures);
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class NamedIntrospectionHandler method testConcurrencyStartStoooooooooop.
@Test
public void testConcurrencyStartStoooooooooop() throws Exception {
lookupReferencesManager.stop();
lookupReferencesManager.start();
final CyclicBarrier cyclicBarrier = new CyclicBarrier(CONCURRENT_THREADS);
final Runnable start = new Runnable() {
@Override
public void run() {
try {
cyclicBarrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw Throwables.propagate(e);
}
lookupReferencesManager.stop();
}
};
final Collection<ListenableFuture<?>> futures = new ArrayList<>(CONCURRENT_THREADS);
for (int i = 0; i < CONCURRENT_THREADS; ++i) {
futures.add(executorService.submit(start));
}
Futures.allAsList(futures).get(100, TimeUnit.MILLISECONDS);
for (ListenableFuture future : futures) {
Assert.assertNull(future.get());
}
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class JettyQosTest method testQoS.
@Test(timeout = 60_000L)
public void testQoS() throws Exception {
final int fastThreads = 20;
final int slowThreads = 15;
final int slowRequestsPerThread = 5;
final int fastRequestsPerThread = 200;
final HttpClient fastClient = new ClientHolder(fastThreads).getClient();
final HttpClient slowClient = new ClientHolder(slowThreads).getClient();
final ExecutorService fastPool = Execs.multiThreaded(fastThreads, "fast-%d");
final ExecutorService slowPool = Execs.multiThreaded(slowThreads, "slow-%d");
final CountDownLatch latch = new CountDownLatch(fastThreads * fastRequestsPerThread);
final AtomicLong fastCount = new AtomicLong();
final AtomicLong slowCount = new AtomicLong();
final AtomicLong fastElapsed = new AtomicLong();
final AtomicLong slowElapsed = new AtomicLong();
for (int i = 0; i < slowThreads; i++) {
slowPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < slowRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = slowClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
slowCount.incrementAndGet();
slowElapsed.addAndGet(System.currentTimeMillis() - startTime);
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// wait for jetty server pool to completely fill up
while (server.getThreadPool().getIdleThreads() != 0) {
Thread.sleep(25);
}
for (int i = 0; i < fastThreads; i++) {
fastPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < fastRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = fastClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/default")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
fastCount.incrementAndGet();
fastElapsed.addAndGet(System.currentTimeMillis() - startTime);
latch.countDown();
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// Wait for all fast requests to be served
latch.await();
slowPool.shutdownNow();
fastPool.shutdown();
// check that fast requests finished quickly
Assert.assertTrue(fastElapsed.get() / fastCount.get() < 500);
}
Aggregations