Search in sources :

Example 26 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class IncrementalIndexTest method testConcurrentAddRead.

@Test(timeout = 60_000L)
public void testConcurrentAddRead() throws InterruptedException, ExecutionException {
    final int dimensionCount = 5;
    final ArrayList<AggregatorFactory> ingestAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    ingestAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        ingestAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("Dim_%s", i)));
        ingestAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("Dim_%s", i)));
    }
    final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        queryAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("sumResult%s", i)));
        queryAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("doubleSumResult%s", i)));
    }
    final IncrementalIndex index = closer.closeLater(indexCreator.createIndex(ingestAggregatorFactories.toArray(new AggregatorFactory[dimensionCount])));
    final int concurrentThreads = 2;
    final int elementsPerThread = 10_000;
    final ListeningExecutorService indexExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("index-executor-%d").setPriority(Thread.MIN_PRIORITY).build()));
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("query-executor-%d").build()));
    final long timestamp = System.currentTimeMillis();
    final Interval queryInterval = new Interval("1900-01-01T00:00:00Z/2900-01-01T00:00:00Z");
    final List<ListenableFuture<?>> indexFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final List<ListenableFuture<?>> queryFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final AtomicInteger currentlyRunning = new AtomicInteger(0);
    final AtomicInteger concurrentlyRan = new AtomicInteger(0);
    final AtomicInteger someoneRan = new AtomicInteger(0);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch readyLatch = new CountDownLatch(concurrentThreads * 2);
    final AtomicInteger queriesAccumualted = new AtomicInteger(0);
    for (int j = 0; j < concurrentThreads; j++) {
        indexFutures.add(indexExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                currentlyRunning.incrementAndGet();
                try {
                    for (int i = 0; i < elementsPerThread; i++) {
                        index.add(getLongRow(timestamp + i, i, dimensionCount));
                        someoneRan.incrementAndGet();
                    }
                } catch (IndexSizeExceededException e) {
                    throw Throwables.propagate(e);
                }
                currentlyRunning.decrementAndGet();
            }
        }));
        final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
        queryFutures.add(queryExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                while (concurrentlyRan.get() == 0) {
                    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
                    Map<String, Object> context = new HashMap<String, Object>();
                    Sequence<Result<TimeseriesResultValue>> sequence = runner.run(query, context);
                    for (Double result : sequence.accumulate(new Double[0], new Accumulator<Double[], Result<TimeseriesResultValue>>() {

                        @Override
                        public Double[] accumulate(Double[] accumulated, Result<TimeseriesResultValue> in) {
                            if (currentlyRunning.get() > 0) {
                                concurrentlyRan.incrementAndGet();
                            }
                            queriesAccumualted.incrementAndGet();
                            return Lists.asList(in.getValue().getDoubleMetric("doubleSumResult0"), accumulated).toArray(new Double[accumulated.length + 1]);
                        }
                    })) {
                        final Integer maxValueExpected = someoneRan.get() + concurrentThreads;
                        if (maxValueExpected > 0) {
                            // Eventually consistent, but should be somewhere in that range
                            // Actual result is validated after all writes are guaranteed done.
                            Assert.assertTrue(String.format("%d >= %g >= 0 violated", maxValueExpected, result), result >= 0 && result <= maxValueExpected);
                        }
                    }
                }
            }
        }));
    }
    readyLatch.await();
    startLatch.countDown();
    List<ListenableFuture<?>> allFutures = new ArrayList<>(queryFutures.size() + indexFutures.size());
    allFutures.addAll(queryFutures);
    allFutures.addAll(indexFutures);
    Futures.allAsList(allFutures).get();
    Assert.assertTrue("Queries ran too fast", queriesAccumualted.get() > 0);
    Assert.assertTrue("Did not hit concurrency, please try again", concurrentlyRan.get() > 0);
    queryExecutor.shutdown();
    indexExecutor.shutdown();
    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
    Map<String, Object> context = new HashMap<String, Object>();
    List<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, context), new LinkedList<Result<TimeseriesResultValue>>());
    boolean isRollup = index.isRollup();
    for (Result<TimeseriesResultValue> result : results) {
        Assert.assertEquals(elementsPerThread * (isRollup ? 1 : concurrentThreads), result.getValue().getLongMetric("rows").intValue());
        for (int i = 0; i < dimensionCount; ++i) {
            Assert.assertEquals(String.format("Failed long sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getLongMetric(String.format("sumResult%s", i)).intValue());
            Assert.assertEquals(String.format("Failed double sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getDoubleMetric(String.format("doubleSumResult%s", i)).intValue());
        }
    }
}
Also used : TimeseriesResultValue(io.druid.query.timeseries.TimeseriesResultValue) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) TimeseriesQueryQueryToolChest(io.druid.query.timeseries.TimeseriesQueryQueryToolChest) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) Segment(io.druid.segment.Segment) Result(io.druid.query.Result) TimeseriesQueryEngine(io.druid.query.timeseries.TimeseriesQueryEngine) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) OffheapIncrementalIndex(io.druid.segment.incremental.OffheapIncrementalIndex) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeseriesQueryRunnerFactory(io.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) QueryRunnerFactory(io.druid.query.QueryRunnerFactory) TimeseriesQueryRunnerFactory(io.druid.query.timeseries.TimeseriesQueryRunnerFactory) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Interval(org.joda.time.Interval) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException) Test(org.junit.Test)

Example 27 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class BenchmarkIndexibleWrites method testConcurrentReads.

/**
   BenchmarkIndexibleWrites.TestConcurrentReads[0]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
   round: 0.28 [+- 0.02], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 1.84, time.total: 59.98, time.warmup: 30.51, time.bench: 29.48
   BenchmarkIndexibleWrites.TestConcurrentReads[1]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
   round: 0.12 [+- 0.01], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 2.05, time.total: 29.21, time.warmup: 14.65, time.bench: 14.55

   */
@BenchmarkOptions(warmupRounds = 100, benchmarkRounds = 100, clock = Clock.REAL_TIME, callgc = true)
@Ignore
@Test
public void testConcurrentReads() throws ExecutionException, InterruptedException {
    final ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("indexible-writes-benchmark-reader-%d").build()));
    final AtomicInteger index = new AtomicInteger(0);
    final AtomicInteger queryableIndex = new AtomicInteger(0);
    List<ListenableFuture<?>> futures = new LinkedList<>();
    final Integer loops = totalIndexSize / concurrentThreads;
    final AtomicBoolean done = new AtomicBoolean(false);
    final CountDownLatch start = new CountDownLatch(1);
    for (int i = 0; i < concurrentThreads; ++i) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    start.await();
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
                final Random rndGen = new Random();
                while (!done.get()) {
                    Integer idx = rndGen.nextInt(queryableIndex.get() + 1);
                    Assert.assertEquals(idx, concurrentIndexible.get(idx));
                }
            }
        }));
    }
    {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        start.countDown();
    }
    for (int i = 1; i < totalIndexSize; ++i) {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        queryableIndex.incrementAndGet();
    }
    done.set(true);
    Futures.allAsList(futures).get();
    executorService.shutdown();
    Assert.assertTrue(String.format("Index too small %d, expected %d across %d loops", index.get(), totalIndexSize, loops), index.get() >= totalIndexSize);
    for (int i = 0; i < index.get(); ++i) {
        Assert.assertEquals(i, concurrentIndexible.get(i).intValue());
    }
    concurrentIndexible.clear();
    futures.clear();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) LinkedList(java.util.LinkedList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Ignore(org.junit.Ignore) Test(org.junit.Test) BenchmarkOptions(com.carrotsearch.junitbenchmarks.BenchmarkOptions)

Example 28 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class HdfsClasspathSetupTest method testConcurrentUpload.

@Test
public void testConcurrentUpload() throws IOException, InterruptedException, ExecutionException, TimeoutException {
    final int concurrency = 10;
    ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrency));
    // barrier ensures that all jobs try to add files to classpath at same time.
    final CyclicBarrier barrier = new CyclicBarrier(concurrency);
    final DistributedFileSystem fs = miniCluster.getFileSystem();
    final Path expectedJarPath = new Path(finalClasspath, dummyJarFile.getName());
    List<ListenableFuture<Boolean>> futures = new ArrayList<>();
    for (int i = 0; i < concurrency; i++) {
        futures.add(pool.submit(new Callable() {

            @Override
            public Boolean call() throws Exception {
                int id = barrier.await();
                Job job = Job.getInstance(conf, "test-job-" + id);
                Path intermediatePathForJob = new Path(intermediatePath, "job-" + id);
                JobHelper.addJarToClassPath(dummyJarFile, finalClasspath, intermediatePathForJob, fs, job);
                // check file gets uploaded to final HDFS path
                Assert.assertTrue(fs.exists(expectedJarPath));
                // check that the intermediate file is not present
                Assert.assertFalse(fs.exists(new Path(intermediatePathForJob, dummyJarFile.getName())));
                // check file gets added to the classpath
                Assert.assertEquals(expectedJarPath.toString(), job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES));
                return true;
            }
        }));
    }
    Futures.allAsList(futures).get(30, TimeUnit.SECONDS);
    pool.shutdownNow();
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Job(org.apache.hadoop.mapreduce.Job) Callable(java.util.concurrent.Callable) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Example 29 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project gerrit by GerritCodeReview.

the class ReviewDbBatchUpdate method executeChangeOps.

private List<ChangeTask> executeChangeOps(boolean parallel, boolean dryrun) throws UpdateException, RestApiException {
    List<ChangeTask> tasks;
    boolean success = false;
    Stopwatch sw = Stopwatch.createStarted();
    try {
        logDebug("Executing change ops (parallel? {})", parallel);
        ListeningExecutorService executor = parallel ? changeUpdateExector : MoreExecutors.newDirectExecutorService();
        tasks = new ArrayList<>(ops.keySet().size());
        try {
            if (notesMigration.commitChangeWrites() && repoView != null) {
                // A NoteDb change may have been rebuilt since the repo was originally
                // opened, so make sure we see that.
                logDebug("Preemptively scanning for repo changes");
                repoView.getRepository().scanForRepoChanges();
            }
            if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
                // Fail fast before attempting any writes if changes are read-only, as
                // this is a programmer error.
                logDebug("Failing early due to read-only Changes table");
                throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
            }
            List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
            for (Map.Entry<Change.Id, Collection<BatchUpdateOp>> e : ops.asMap().entrySet()) {
                ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread(), dryrun);
                tasks.add(task);
                if (!parallel) {
                    logDebug("Direct execution of task for ops: {}", ops);
                }
                futures.add(executor.submit(task));
            }
            if (parallel) {
                logDebug("Waiting on futures for {} ops spanning {} changes", ops.size(), ops.keySet().size());
            }
            Futures.allAsList(futures).get();
            if (notesMigration.commitChangeWrites()) {
                if (!dryrun) {
                    executeNoteDbUpdates(tasks);
                }
            }
            success = true;
        } catch (ExecutionException | InterruptedException e) {
            Throwables.throwIfInstanceOf(e.getCause(), UpdateException.class);
            Throwables.throwIfInstanceOf(e.getCause(), RestApiException.class);
            throw new UpdateException(e);
        } catch (OrmException | IOException e) {
            throw new UpdateException(e);
        }
    } finally {
        metrics.executeChangeOpsLatency.record(success, sw.elapsed(NANOSECONDS), NANOSECONDS);
    }
    return tasks;
}
Also used : Stopwatch(com.google.common.base.Stopwatch) ArrayList(java.util.ArrayList) IOException(java.io.IOException) OrmException(com.google.gwtorm.server.OrmException) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Collection(java.util.Collection) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) RequestId(com.google.gerrit.server.util.RequestId) ExecutionException(java.util.concurrent.ExecutionException) RestApiException(com.google.gerrit.extensions.restapi.RestApiException) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 30 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project gerrit by GerritCodeReview.

the class RebuildNoteDb method run.

@Override
public int run() throws Exception {
    mustHaveValidSite();
    dbInjector = createDbInjector(MULTI_USER);
    threads = ThreadLimiter.limitThreads(dbInjector, threads);
    LifecycleManager dbManager = new LifecycleManager();
    dbManager.add(dbInjector);
    dbManager.start();
    sysInjector = createSysInjector();
    sysInjector.injectMembers(this);
    if (!notesMigration.enabled()) {
        throw die("NoteDb is not enabled.");
    }
    LifecycleManager sysManager = new LifecycleManager();
    sysManager.add(sysInjector);
    sysManager.start();
    ListeningExecutorService executor = newExecutor();
    System.out.println("Rebuilding the NoteDb");
    ImmutableListMultimap<Project.NameKey, Change.Id> changesByProject = getChangesByProject();
    boolean ok;
    Stopwatch sw = Stopwatch.createStarted();
    try (Repository allUsersRepo = repoManager.openRepository(allUsersName)) {
        deleteRefs(RefNames.REFS_DRAFT_COMMENTS, allUsersRepo);
        List<ListenableFuture<Boolean>> futures = new ArrayList<>();
        List<Project.NameKey> projectNames = Ordering.usingToString().sortedCopy(changesByProject.keySet());
        for (Project.NameKey project : projectNames) {
            ListenableFuture<Boolean> future = executor.submit(() -> {
                try (ReviewDb db = unwrapDb(schemaFactory.open())) {
                    return rebuildProject(db, changesByProject, project, allUsersRepo);
                } catch (Exception e) {
                    log.error("Error rebuilding project " + project, e);
                    return false;
                }
            });
            futures.add(future);
        }
        try {
            ok = Iterables.all(Futures.allAsList(futures).get(), Predicates.equalTo(true));
        } catch (InterruptedException | ExecutionException e) {
            log.error("Error rebuilding projects", e);
            ok = false;
        }
    }
    double t = sw.elapsed(TimeUnit.MILLISECONDS) / 1000d;
    System.out.format("Rebuild %d changes in %.01fs (%.01f/s)\n", changesByProject.size(), t, changesByProject.size() / t);
    return ok ? 0 : 1;
}
Also used : LifecycleManager(com.google.gerrit.lifecycle.LifecycleManager) Stopwatch(com.google.common.base.Stopwatch) ArrayList(java.util.ArrayList) OrmException(com.google.gwtorm.server.OrmException) NoPatchSetsException(com.google.gerrit.server.notedb.rebuild.ChangeRebuilder.NoPatchSetsException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Project(com.google.gerrit.reviewdb.client.Project) Repository(org.eclipse.jgit.lib.Repository) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) ObjectId(org.eclipse.jgit.lib.ObjectId) ExecutionException(java.util.concurrent.ExecutionException) ReviewDb(com.google.gerrit.reviewdb.server.ReviewDb)

Aggregations

ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)201 Test (org.junit.Test)115 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)75 ArrayList (java.util.ArrayList)43 CountDownLatch (java.util.concurrent.CountDownLatch)29 ExecutorService (java.util.concurrent.ExecutorService)28 IOException (java.io.IOException)25 ExecutionException (java.util.concurrent.ExecutionException)25 Interval (org.joda.time.Interval)25 DateTime (org.joda.time.DateTime)23 List (java.util.List)21 Callable (java.util.concurrent.Callable)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 DruidServer (io.druid.client.DruidServer)18 DataSegment (io.druid.timeline.DataSegment)18 DruidServer (org.apache.druid.client.DruidServer)17 ImmutableMap (com.google.common.collect.ImmutableMap)16 File (java.io.File)16 Map (java.util.Map)16 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)15