Search in sources :

Example 66 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class LookupCoordinatorManager method deleteAllOnTier.

void deleteAllOnTier(final String tier, final Collection<String> dropLookups) throws ExecutionException, InterruptedException, IOException {
    if (dropLookups.isEmpty()) {
        LOG.debug("Nothing to drop");
        return;
    }
    final Collection<URL> urls = getAllHostsAnnounceEndpoint(tier);
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                for (final String drop : dropLookups) {
                    final URL lookupURL;
                    try {
                        lookupURL = new URL(url.getProtocol(), url.getHost(), url.getPort(), String.format("%s/%s", url.getFile(), drop));
                    } catch (MalformedURLException e) {
                        throw new ISE(e, "Error creating url for [%s]/[%s]", url, drop);
                    }
                    try {
                        deleteOnHost(lookupURL);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        LOG.warn("Delete [%s] interrupted", lookupURL);
                        throw Throwables.propagate(e);
                    } catch (IOException | ExecutionException e) {
                        // Don't raise as ExecutionException. Just log and continue
                        LOG.makeAlert(e, "Error deleting [%s]", lookupURL).emit();
                    }
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
        throw new ExecutionException("Timeout in updating hosts! Attempting to cancel", e);
    }
}
Also used : MalformedURLException(java.net.MalformedURLException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) URL(java.net.URL) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 67 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class CompressionStrategyTest method testKnownSizeConcurrency.

@Test(timeout = 60000)
public void testKnownSizeConcurrency() throws Exception {
    final int numThreads = 20;
    ListeningExecutorService threadPoolExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
    List<ListenableFuture<?>> results = new ArrayList<>();
    for (int i = 0; i < numThreads; ++i) {
        results.add(threadPoolExecutor.submit(new Runnable() {

            @Override
            public void run() {
                ByteBuffer compressed = ByteBuffer.wrap(compressionStrategy.getCompressor().compress(originalData));
                ByteBuffer output = ByteBuffer.allocate(originalData.length);
                // TODO: Lambdas would be nice here whenever we use Java 8
                compressionStrategy.getDecompressor().decompress(compressed, compressed.array().length, output, originalData.length);
                byte[] checkArray = new byte[DATA_SIZER];
                output.get(checkArray);
                Assert.assertArrayEquals("Uncompressed data does not match", originalData, checkArray);
            }
        }));
    }
    Futures.allAsList(results).get();
}
Also used : ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 68 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class IncrementalIndexTest method testConcurrentAddRead.

@Test(timeout = 60_000L)
public void testConcurrentAddRead() throws InterruptedException, ExecutionException {
    final int dimensionCount = 5;
    final ArrayList<AggregatorFactory> ingestAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    ingestAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        ingestAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("Dim_%s", i)));
        ingestAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("Dim_%s", i)));
    }
    final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        queryAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("sumResult%s", i)));
        queryAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("doubleSumResult%s", i)));
    }
    final IncrementalIndex index = closer.closeLater(indexCreator.createIndex(ingestAggregatorFactories.toArray(new AggregatorFactory[dimensionCount])));
    final int concurrentThreads = 2;
    final int elementsPerThread = 10_000;
    final ListeningExecutorService indexExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("index-executor-%d").setPriority(Thread.MIN_PRIORITY).build()));
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("query-executor-%d").build()));
    final long timestamp = System.currentTimeMillis();
    final Interval queryInterval = new Interval("1900-01-01T00:00:00Z/2900-01-01T00:00:00Z");
    final List<ListenableFuture<?>> indexFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final List<ListenableFuture<?>> queryFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final AtomicInteger currentlyRunning = new AtomicInteger(0);
    final AtomicInteger concurrentlyRan = new AtomicInteger(0);
    final AtomicInteger someoneRan = new AtomicInteger(0);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch readyLatch = new CountDownLatch(concurrentThreads * 2);
    final AtomicInteger queriesAccumualted = new AtomicInteger(0);
    for (int j = 0; j < concurrentThreads; j++) {
        indexFutures.add(indexExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                currentlyRunning.incrementAndGet();
                try {
                    for (int i = 0; i < elementsPerThread; i++) {
                        index.add(getLongRow(timestamp + i, i, dimensionCount));
                        someoneRan.incrementAndGet();
                    }
                } catch (IndexSizeExceededException e) {
                    throw Throwables.propagate(e);
                }
                currentlyRunning.decrementAndGet();
            }
        }));
        final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
        queryFutures.add(queryExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                while (concurrentlyRan.get() == 0) {
                    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
                    Map<String, Object> context = new HashMap<String, Object>();
                    Sequence<Result<TimeseriesResultValue>> sequence = runner.run(query, context);
                    for (Double result : sequence.accumulate(new Double[0], new Accumulator<Double[], Result<TimeseriesResultValue>>() {

                        @Override
                        public Double[] accumulate(Double[] accumulated, Result<TimeseriesResultValue> in) {
                            if (currentlyRunning.get() > 0) {
                                concurrentlyRan.incrementAndGet();
                            }
                            queriesAccumualted.incrementAndGet();
                            return Lists.asList(in.getValue().getDoubleMetric("doubleSumResult0"), accumulated).toArray(new Double[accumulated.length + 1]);
                        }
                    })) {
                        final Integer maxValueExpected = someoneRan.get() + concurrentThreads;
                        if (maxValueExpected > 0) {
                            // Eventually consistent, but should be somewhere in that range
                            // Actual result is validated after all writes are guaranteed done.
                            Assert.assertTrue(String.format("%d >= %g >= 0 violated", maxValueExpected, result), result >= 0 && result <= maxValueExpected);
                        }
                    }
                }
            }
        }));
    }
    readyLatch.await();
    startLatch.countDown();
    List<ListenableFuture<?>> allFutures = new ArrayList<>(queryFutures.size() + indexFutures.size());
    allFutures.addAll(queryFutures);
    allFutures.addAll(indexFutures);
    Futures.allAsList(allFutures).get();
    Assert.assertTrue("Queries ran too fast", queriesAccumualted.get() > 0);
    Assert.assertTrue("Did not hit concurrency, please try again", concurrentlyRan.get() > 0);
    queryExecutor.shutdown();
    indexExecutor.shutdown();
    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
    Map<String, Object> context = new HashMap<String, Object>();
    List<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, context), new LinkedList<Result<TimeseriesResultValue>>());
    boolean isRollup = index.isRollup();
    for (Result<TimeseriesResultValue> result : results) {
        Assert.assertEquals(elementsPerThread * (isRollup ? 1 : concurrentThreads), result.getValue().getLongMetric("rows").intValue());
        for (int i = 0; i < dimensionCount; ++i) {
            Assert.assertEquals(String.format("Failed long sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getLongMetric(String.format("sumResult%s", i)).intValue());
            Assert.assertEquals(String.format("Failed double sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getDoubleMetric(String.format("doubleSumResult%s", i)).intValue());
        }
    }
}
Also used : TimeseriesResultValue(io.druid.query.timeseries.TimeseriesResultValue) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) TimeseriesQueryQueryToolChest(io.druid.query.timeseries.TimeseriesQueryQueryToolChest) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) Segment(io.druid.segment.Segment) Result(io.druid.query.Result) TimeseriesQueryEngine(io.druid.query.timeseries.TimeseriesQueryEngine) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) OffheapIncrementalIndex(io.druid.segment.incremental.OffheapIncrementalIndex) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeseriesQueryRunnerFactory(io.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) QueryRunnerFactory(io.druid.query.QueryRunnerFactory) TimeseriesQueryRunnerFactory(io.druid.query.timeseries.TimeseriesQueryRunnerFactory) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Interval(org.joda.time.Interval) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException) Test(org.junit.Test)

Example 69 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class BenchmarkIndexibleWrites method testConcurrentReads.

/**
   BenchmarkIndexibleWrites.TestConcurrentReads[0]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
   round: 0.28 [+- 0.02], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 1.84, time.total: 59.98, time.warmup: 30.51, time.bench: 29.48
   BenchmarkIndexibleWrites.TestConcurrentReads[1]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
   round: 0.12 [+- 0.01], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 2.05, time.total: 29.21, time.warmup: 14.65, time.bench: 14.55

   */
@BenchmarkOptions(warmupRounds = 100, benchmarkRounds = 100, clock = Clock.REAL_TIME, callgc = true)
@Ignore
@Test
public void testConcurrentReads() throws ExecutionException, InterruptedException {
    final ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("indexible-writes-benchmark-reader-%d").build()));
    final AtomicInteger index = new AtomicInteger(0);
    final AtomicInteger queryableIndex = new AtomicInteger(0);
    List<ListenableFuture<?>> futures = new LinkedList<>();
    final Integer loops = totalIndexSize / concurrentThreads;
    final AtomicBoolean done = new AtomicBoolean(false);
    final CountDownLatch start = new CountDownLatch(1);
    for (int i = 0; i < concurrentThreads; ++i) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    start.await();
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
                final Random rndGen = new Random();
                while (!done.get()) {
                    Integer idx = rndGen.nextInt(queryableIndex.get() + 1);
                    Assert.assertEquals(idx, concurrentIndexible.get(idx));
                }
            }
        }));
    }
    {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        start.countDown();
    }
    for (int i = 1; i < totalIndexSize; ++i) {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        queryableIndex.incrementAndGet();
    }
    done.set(true);
    Futures.allAsList(futures).get();
    executorService.shutdown();
    Assert.assertTrue(String.format("Index too small %d, expected %d across %d loops", index.get(), totalIndexSize, loops), index.get() >= totalIndexSize);
    for (int i = 0; i < index.get(); ++i) {
        Assert.assertEquals(i, concurrentIndexible.get(i).intValue());
    }
    concurrentIndexible.clear();
    futures.clear();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) LinkedList(java.util.LinkedList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Ignore(org.junit.Ignore) Test(org.junit.Test) BenchmarkOptions(com.carrotsearch.junitbenchmarks.BenchmarkOptions)

Example 70 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project weave by continuuity.

the class ZKDiscoveryService method createServiceLoader.

/**
   * Creates a CacheLoader for creating live Iterable for watching instances changes for a given service.
   */
private CacheLoader<String, ServiceDiscovered> createServiceLoader() {
    return new CacheLoader<String, ServiceDiscovered>() {

        @Override
        public ServiceDiscovered load(String service) throws Exception {
            final DefaultServiceDiscovered serviceDiscovered = new DefaultServiceDiscovered(service);
            final String serviceBase = "/" + service;
            // Watch for children changes in /service
            ZKOperations.watchChildren(zkClient, serviceBase, new ZKOperations.ChildrenCallback() {

                @Override
                public void updated(NodeChildren nodeChildren) {
                    // Fetch data of all children nodes in parallel.
                    List<String> children = nodeChildren.getChildren();
                    List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.size());
                    for (String child : children) {
                        dataFutures.add(zkClient.getData(serviceBase + "/" + child));
                    }
                    // Update the service map when all fetching are done.
                    final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
                    fetchFuture.addListener(new Runnable() {

                        @Override
                        public void run() {
                            ImmutableSet.Builder<Discoverable> builder = ImmutableSet.builder();
                            for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
                                // For successful fetch, decode the content.
                                if (nodeData != null) {
                                    Discoverable discoverable = DiscoverableAdapter.decode(nodeData.getData());
                                    if (discoverable != null) {
                                        builder.add(discoverable);
                                    }
                                }
                            }
                            serviceDiscovered.setDiscoverables(builder.build());
                        }
                    }, Threads.SAME_THREAD_EXECUTOR);
                }
            });
            return serviceDiscovered;
        }
    };
}
Also used : NodeData(com.continuuity.weave.zookeeper.NodeData) ImmutableSet(com.google.common.collect.ImmutableSet) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) CacheLoader(com.google.common.cache.CacheLoader) List(java.util.List) ZKOperations(com.continuuity.weave.zookeeper.ZKOperations) NodeChildren(com.continuuity.weave.zookeeper.NodeChildren)

Aggregations

ListenableFuture (com.google.common.util.concurrent.ListenableFuture)192 Test (org.junit.Test)78 ArrayList (java.util.ArrayList)63 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)53 List (java.util.List)50 ExecutionException (java.util.concurrent.ExecutionException)42 Map (java.util.Map)36 IOException (java.io.IOException)35 CountDownLatch (java.util.concurrent.CountDownLatch)26 File (java.io.File)23 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)23 ImmutableList (com.google.common.collect.ImmutableList)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 Futures (com.google.common.util.concurrent.Futures)19 Callable (java.util.concurrent.Callable)19 ImmutableMap (com.google.common.collect.ImmutableMap)18 HashMap (java.util.HashMap)16 Lists (com.google.common.collect.Lists)14 URL (java.net.URL)14 Set (java.util.Set)14