Search in sources :

Example 86 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class BenchmarkIndexibleWrites method testConcurrentReads.

/**
 *   BenchmarkIndexibleWrites.TestConcurrentReads[0]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
 *   round: 0.28 [+- 0.02], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 1.84, time.total: 59.98, time.warmup: 30.51, time.bench: 29.48
 *   BenchmarkIndexibleWrites.TestConcurrentReads[1]: [measured 100 out of 200 rounds, threads: 1 (sequential)]
 *   round: 0.12 [+- 0.01], round.block: 0.00 [+- 0.00], round.gc: 0.02 [+- 0.00], GC.calls: 396, GC.time: 2.05, time.total: 29.21, time.warmup: 14.65, time.bench: 14.55
 */
@BenchmarkOptions(warmupRounds = 100, benchmarkRounds = 100, clock = Clock.REAL_TIME, callgc = true)
@Ignore
@Test
public void testConcurrentReads() throws ExecutionException, InterruptedException {
    final ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("indexible-writes-benchmark-reader-%d").build()));
    final AtomicInteger index = new AtomicInteger(0);
    final AtomicInteger queryableIndex = new AtomicInteger(0);
    List<ListenableFuture<?>> futures = new ArrayList<>();
    final Integer loops = totalIndexSize / concurrentThreads;
    final AtomicBoolean done = new AtomicBoolean(false);
    final CountDownLatch start = new CountDownLatch(1);
    for (int i = 0; i < concurrentThreads; ++i) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    start.await();
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
                final Random rndGen = ThreadLocalRandom.current();
                while (!done.get()) {
                    Integer idx = rndGen.nextInt(queryableIndex.get() + 1);
                    Assert.assertEquals(idx, concurrentIndexible.get(idx));
                }
            }
        }));
    }
    {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        start.countDown();
    }
    for (int i = 1; i < totalIndexSize; ++i) {
        final Integer idx = index.getAndIncrement();
        concurrentIndexible.set(idx, idx);
        queryableIndex.incrementAndGet();
    }
    done.set(true);
    Futures.allAsList(futures).get();
    executorService.shutdown();
    Assert.assertTrue(StringUtils.format("Index too small %d, expected %d across %d loops", index.get(), totalIndexSize, loops), index.get() >= totalIndexSize);
    for (int i = 0; i < index.get(); ++i) {
        Assert.assertEquals(i, concurrentIndexible.get(i).intValue());
    }
    concurrentIndexible.clear();
    futures.clear();
}
Also used : ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Ignore(org.junit.Ignore) Test(org.junit.Test) BenchmarkOptions(com.carrotsearch.junitbenchmarks.BenchmarkOptions)

Example 87 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class MetricsEmittingQueryProcessingPoolTest method testNonPrioritizedExecutorDelegate.

@Test
public void testNonPrioritizedExecutorDelegate() {
    ListeningExecutorService service = Mockito.mock(ListeningExecutorService.class);
    ExecutorServiceMonitor monitor = new ExecutorServiceMonitor();
    MetricsEmittingQueryProcessingPool processingPool = new MetricsEmittingQueryProcessingPool(service, monitor);
    Assert.assertSame(service, processingPool.delegate());
    ServiceEmitter serviceEmitter = Mockito.mock(ServiceEmitter.class);
    monitor.doMonitor(serviceEmitter);
    Mockito.verifyNoInteractions(serviceEmitter);
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Test(org.junit.Test)

Example 88 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class HdfsClasspathSetupTest method testConcurrentUpload.

@Test
public void testConcurrentUpload() throws InterruptedException, ExecutionException, TimeoutException {
    final int concurrency = 10;
    ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrency));
    // barrier ensures that all jobs try to add files to classpath at same time.
    final CyclicBarrier barrier = new CyclicBarrier(concurrency);
    final Path expectedJarPath = new Path(finalClasspath, dummyJarFile.getName());
    List<ListenableFuture<Boolean>> futures = new ArrayList<>();
    for (int i = 0; i < concurrency; i++) {
        futures.add(pool.submit(new Callable() {

            @Override
            public Boolean call() throws Exception {
                int id = barrier.await();
                Job job = Job.getInstance(conf, "test-job-" + id);
                Path intermediatePathForJob = new Path(intermediatePath, "job-" + id);
                JobHelper.addJarToClassPath(dummyJarFile, finalClasspath, intermediatePathForJob, localFS, job);
                // check file gets uploaded to final HDFS path
                Assert.assertTrue(localFS.exists(expectedJarPath));
                // check that the intermediate file is not present
                Assert.assertFalse(localFS.exists(new Path(intermediatePathForJob, dummyJarFile.getName())));
                // check file gets added to the classpath
                Assert.assertEquals(expectedJarPath.toString(), job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES));
                return true;
            }
        }));
    }
    Futures.allAsList(futures).get(30, TimeUnit.SECONDS);
    pool.shutdownNow();
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Job(org.apache.hadoop.mapreduce.Job) Callable(java.util.concurrent.Callable) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Example 89 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class TestTaskRunner method stop.

@Override
public void stop() {
    stopping = true;
    for (Map.Entry<Integer, ListeningExecutorService> entry : exec.entrySet()) {
        try {
            entry.getValue().shutdown();
        } catch (SecurityException ex) {
            throw new RuntimeException("I can't control my own threads!", ex);
        }
    }
    for (TestTaskRunnerWorkItem item : runningItems) {
        final Task task = item.getTask();
        final long start = System.currentTimeMillis();
        if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) {
            // Attempt graceful shutdown.
            log.info("Starting graceful shutdown of task[%s].", task.getId());
            try {
                task.stopGracefully(taskConfig);
                final TaskStatus taskStatus = item.getResult().get(new Interval(DateTimes.utc(start), taskConfig.getGracefulShutdownTimeout()).toDurationMillis(), TimeUnit.MILLISECONDS);
                // Ignore status, it doesn't matter for graceful shutdowns.
                log.info("Graceful shutdown of task[%s] finished in %,dms.", task.getId(), System.currentTimeMillis() - start);
                TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), taskStatus);
            } catch (Exception e) {
                String errMsg = "Graceful shutdown of task aborted with exception, see task logs for more information";
                TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId(), errMsg));
                throw new RE(e, "Graceful shutdown of task[%s] aborted with exception", task.getId());
            }
        } else {
            TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId(), "Task failure while shutting down gracefully"));
        }
    }
    // Ok, now interrupt everything.
    for (Map.Entry<Integer, ListeningExecutorService> entry : exec.entrySet()) {
        try {
            entry.getValue().shutdownNow();
        } catch (SecurityException ex) {
            throw new RuntimeException("I can't control my own threads!", ex);
        }
    }
}
Also used : Task(org.apache.druid.indexing.common.task.Task) TaskStatus(org.apache.druid.indexer.TaskStatus) RE(org.apache.druid.java.util.common.RE) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Interval(org.joda.time.Interval)

Example 90 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class RowBasedGrouperHelper method createGrouperAccumulatorPair.

/**
 * Create a {@link Grouper} that groups according to the dimensions and aggregators in "query", along with
 * an {@link Accumulator} that accepts ResultRows and forwards them to the grouper.
 *
 * The pair will operate in one of two modes:
 *
 * 1) Combining mode (used if "subquery" is null). In this mode, filters from the "query" are ignored, and
 * its aggregators are converted into combining form. The input ResultRows are assumed to be partially-grouped
 * results originating from the provided "query".
 *
 * 2) Subquery mode (used if "subquery" is nonnull). In this mode, filters from the "query" (both intervals
 * and dim filters) are respected, and its aggregators are used in standard (not combining) form. The input
 * ResultRows are assumed to be results originating from the provided "subquery".
 *
 * @param query               query that we are grouping for
 * @param subquery            optional subquery that we are receiving results from (see combining vs. subquery
 *                            mode above)
 * @param config              groupBy query config
 * @param bufferSupplier      supplier of merge buffers
 * @param combineBufferHolder holder of combine buffers. Unused if concurrencyHint = -1, and may be null in that case
 * @param concurrencyHint     -1 for single-threaded Grouper, >=1 for concurrent Grouper
 * @param temporaryStorage    temporary storage used for spilling from the Grouper
 * @param spillMapper         object mapper used for spilling from the Grouper
 * @param grouperSorter       executor service used for parallel combining. Unused if concurrencyHint = -1, and may
 *                            be null in that case
 * @param priority            query priority
 * @param hasQueryTimeout     whether or not this query has a timeout
 * @param queryTimeoutAt      when this query times out, in milliseconds since the epoch
 * @param mergeBufferSize     size of the merge buffers from "bufferSupplier"
 */
public static Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> createGrouperAccumulatorPair(final GroupByQuery query, @Nullable final GroupByQuery subquery, final GroupByQueryConfig config, final Supplier<ByteBuffer> bufferSupplier, @Nullable final ReferenceCountingResourceHolder<ByteBuffer> combineBufferHolder, final int concurrencyHint, final LimitedTemporaryStorage temporaryStorage, final ObjectMapper spillMapper, @Nullable final ListeningExecutorService grouperSorter, final int priority, final boolean hasQueryTimeout, final long queryTimeoutAt, final int mergeBufferSize) {
    // concurrencyHint >= 1 for concurrent groupers, -1 for single-threaded
    Preconditions.checkArgument(concurrencyHint >= 1 || concurrencyHint == -1, "invalid concurrencyHint");
    if (concurrencyHint >= 1) {
        Preconditions.checkNotNull(grouperSorter, "grouperSorter executor must be provided");
    }
    // See method-level javadoc; we go into combining mode if there is no subquery.
    final boolean combining = subquery == null;
    final List<ColumnType> valueTypes = DimensionHandlerUtils.getValueTypesFromDimensionSpecs(query.getDimensions());
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final boolean includeTimestamp = query.getResultRowHasTimestamp();
    final ThreadLocal<ResultRow> columnSelectorRow = new ThreadLocal<>();
    ColumnSelectorFactory columnSelectorFactory = createResultRowBasedColumnSelectorFactory(combining ? query : subquery, columnSelectorRow::get, RowSignature.Finalization.UNKNOWN);
    // Apply virtual columns if we are in subquery (non-combining) mode.
    if (!combining) {
        columnSelectorFactory = query.getVirtualColumns().wrap(columnSelectorFactory);
    }
    final boolean willApplyLimitPushDown = query.isApplyLimitPushDown();
    final DefaultLimitSpec limitSpec = willApplyLimitPushDown ? (DefaultLimitSpec) query.getLimitSpec() : null;
    boolean sortHasNonGroupingFields = false;
    if (willApplyLimitPushDown) {
        sortHasNonGroupingFields = DefaultLimitSpec.sortingOrderHasNonGroupingFields(limitSpec, query.getDimensions());
    }
    final AggregatorFactory[] aggregatorFactories;
    if (combining) {
        aggregatorFactories = query.getAggregatorSpecs().stream().map(AggregatorFactory::getCombiningFactory).toArray(AggregatorFactory[]::new);
    } else {
        aggregatorFactories = query.getAggregatorSpecs().toArray(new AggregatorFactory[0]);
    }
    final Grouper.KeySerdeFactory<RowBasedKey> keySerdeFactory = new RowBasedKeySerdeFactory(includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize() / (concurrencyHint == -1 ? 1 : concurrencyHint), valueTypes, aggregatorFactories, limitSpec);
    final Grouper<RowBasedKey> grouper;
    if (concurrencyHint == -1) {
        grouper = new SpillingGrouper<>(bufferSupplier, keySerdeFactory, columnSelectorFactory, aggregatorFactories, querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, true, limitSpec, sortHasNonGroupingFields, mergeBufferSize);
    } else {
        final Grouper.KeySerdeFactory<RowBasedKey> combineKeySerdeFactory = new RowBasedKeySerdeFactory(includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions(), // use entire dictionary space for combining key serde
        querySpecificConfig.getMaxMergingDictionarySize(), valueTypes, aggregatorFactories, limitSpec);
        grouper = new ConcurrentGrouper<>(querySpecificConfig, bufferSupplier, combineBufferHolder, keySerdeFactory, combineKeySerdeFactory, columnSelectorFactory, aggregatorFactories, temporaryStorage, spillMapper, concurrencyHint, limitSpec, sortHasNonGroupingFields, grouperSorter, priority, hasQueryTimeout, queryTimeoutAt);
    }
    final int keySize = includeTimestamp ? query.getDimensions().size() + 1 : query.getDimensions().size();
    final ValueExtractFunction valueExtractFn = makeValueExtractFunction(query, combining, includeTimestamp, columnSelectorFactory, valueTypes);
    final Predicate<ResultRow> rowPredicate;
    if (combining) {
        // Filters are not applied in combining mode.
        rowPredicate = row -> true;
    } else {
        rowPredicate = getResultRowPredicate(query, subquery);
    }
    final Accumulator<AggregateResult, ResultRow> accumulator = (priorResult, row) -> {
        BaseQuery.checkInterrupted();
        if (priorResult != null && !priorResult.isOk()) {
            // Pass-through error returns without doing more work.
            return priorResult;
        }
        if (!grouper.isInitialized()) {
            grouper.init();
        }
        if (!rowPredicate.test(row)) {
            return AggregateResult.ok();
        }
        columnSelectorRow.set(row);
        final Comparable[] key = new Comparable[keySize];
        valueExtractFn.apply(row, key);
        final AggregateResult aggregateResult = grouper.aggregate(new RowBasedKey(key));
        columnSelectorRow.set(null);
        return aggregateResult;
    };
    return new Pair<>(grouper, accumulator);
}
Also used : Arrays(java.util.Arrays) Comparators(org.apache.druid.java.util.common.guava.Comparators) IntArrayUtils(org.apache.druid.common.utils.IntArrayUtils) DimensionHandlerUtils(org.apache.druid.segment.DimensionHandlerUtils) ColumnValueSelector(org.apache.druid.segment.ColumnValueSelector) AllGranularity(org.apache.druid.java.util.common.granularity.AllGranularity) IndexedInts(org.apache.druid.segment.data.IndexedInts) ByteBuffer(java.nio.ByteBuffer) Pair(org.apache.druid.java.util.common.Pair) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) BaseFloatColumnValueSelector(org.apache.druid.segment.BaseFloatColumnValueSelector) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) ColumnSelectorFactory(org.apache.druid.segment.ColumnSelectorFactory) RowAdapter(org.apache.druid.segment.RowAdapter) ColumnSelectorStrategyFactory(org.apache.druid.query.dimension.ColumnSelectorStrategyFactory) JsonValue(com.fasterxml.jackson.annotation.JsonValue) GroupingAggregatorFactory(org.apache.druid.query.aggregation.GroupingAggregatorFactory) BufferComparator(org.apache.druid.query.groupby.epinephelinae.Grouper.BufferComparator) Object2IntOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap) IAE(org.apache.druid.java.util.common.IAE) ToLongFunction(java.util.function.ToLongFunction) Longs(com.google.common.primitives.Longs) RowBasedColumnSelectorFactory(org.apache.druid.segment.RowBasedColumnSelectorFactory) ResultRow(org.apache.druid.query.groupby.ResultRow) Predicate(java.util.function.Predicate) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) ValueType(org.apache.druid.segment.column.ValueType) Collectors(java.util.stream.Collectors) List(java.util.List) ColumnCapabilitiesImpl(org.apache.druid.segment.column.ColumnCapabilitiesImpl) BooleanValueMatcher(org.apache.druid.segment.filter.BooleanValueMatcher) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) ColumnCapabilities(org.apache.druid.segment.column.ColumnCapabilities) BaseDoubleColumnValueSelector(org.apache.druid.segment.BaseDoubleColumnValueSelector) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) IntStream(java.util.stream.IntStream) ColumnSelectorPlus(org.apache.druid.query.ColumnSelectorPlus) ComparableList(org.apache.druid.segment.data.ComparableList) Supplier(com.google.common.base.Supplier) BaseQuery(org.apache.druid.query.BaseQuery) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) SettableSupplier(org.apache.druid.common.guava.SettableSupplier) ColumnSelectorStrategy(org.apache.druid.query.dimension.ColumnSelectorStrategy) StringComparators(org.apache.druid.query.ordering.StringComparators) ComparableStringArray(org.apache.druid.segment.data.ComparableStringArray) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) DimensionSelector(org.apache.druid.segment.DimensionSelector) Nullable(javax.annotation.Nullable) ValueMatcher(org.apache.druid.query.filter.ValueMatcher) ColumnInspector(org.apache.druid.segment.ColumnInspector) StringComparator(org.apache.druid.query.ordering.StringComparator) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) DateTime(org.joda.time.DateTime) Ints(com.google.common.primitives.Ints) BaseLongColumnValueSelector(org.apache.druid.segment.BaseLongColumnValueSelector) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) NullHandling(org.apache.druid.common.config.NullHandling) RowSignature(org.apache.druid.segment.column.RowSignature) Closeable(java.io.Closeable) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) BitSet(java.util.BitSet) IntArrays(it.unimi.dsi.fastutil.ints.IntArrays) Comparator(java.util.Comparator) Filters(org.apache.druid.segment.filter.Filters) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) Filter(org.apache.druid.query.filter.Filter) ColumnType(org.apache.druid.segment.column.ColumnType) ColumnSelectorFactory(org.apache.druid.segment.ColumnSelectorFactory) RowBasedColumnSelectorFactory(org.apache.druid.segment.RowBasedColumnSelectorFactory) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) Pair(org.apache.druid.java.util.common.Pair) ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) GroupingAggregatorFactory(org.apache.druid.query.aggregation.GroupingAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory)

Aggregations

ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)201 Test (org.junit.Test)115 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)75 ArrayList (java.util.ArrayList)43 CountDownLatch (java.util.concurrent.CountDownLatch)29 ExecutorService (java.util.concurrent.ExecutorService)28 IOException (java.io.IOException)25 ExecutionException (java.util.concurrent.ExecutionException)25 Interval (org.joda.time.Interval)25 DateTime (org.joda.time.DateTime)23 List (java.util.List)21 Callable (java.util.concurrent.Callable)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 DruidServer (io.druid.client.DruidServer)18 DataSegment (io.druid.timeline.DataSegment)18 DruidServer (org.apache.druid.client.DruidServer)17 ImmutableMap (com.google.common.collect.ImmutableMap)16 File (java.io.File)16 Map (java.util.Map)16 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)15