Search in sources :

Example 6 with GroupByQueryConfig

use of io.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.

the class GroupByMergedQueryRunner method run.

@Override
public Sequence<T> run(final Query<T> queryParam, final Map<String, Object> responseContext) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = configSupplier.get().withOverrides(query);
    final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
    final Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> indexAccumulatorPair = GroupByQueryHelper.createIndexAccumulatorPair(query, querySpecificConfig, bufferPool, true);
    final Pair<Queue, Accumulator<Queue, T>> bySegmentAccumulatorPair = GroupByQueryHelper.createBySegmentAccumulatorPair();
    final boolean bySegment = BaseQuery.getContextBySegment(query, false);
    final int priority = BaseQuery.getContextPriority(query, 0);
    ListenableFuture<List<Void>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Void>>() {

        @Override
        public ListenableFuture<Void> apply(final QueryRunner<T> input) {
            if (input == null) {
                throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
            }
            ListenableFuture<Void> future = exec.submit(new AbstractPrioritizedCallable<Void>(priority) {

                @Override
                public Void call() throws Exception {
                    try {
                        if (bySegment) {
                            input.run(queryParam, responseContext).accumulate(bySegmentAccumulatorPair.lhs, bySegmentAccumulatorPair.rhs);
                        } else {
                            input.run(queryParam, responseContext).accumulate(indexAccumulatorPair.lhs, indexAccumulatorPair.rhs);
                        }
                        return null;
                    } catch (QueryInterruptedException e) {
                        throw Throwables.propagate(e);
                    } catch (Exception e) {
                        log.error(e, "Exception with one of the sequences!");
                        throw Throwables.propagate(e);
                    }
                }
            });
            if (isSingleThreaded) {
                waitForFutureCompletion(query, future, indexAccumulatorPair.lhs);
            }
            return future;
        }
    })));
    if (!isSingleThreaded) {
        waitForFutureCompletion(query, futures, indexAccumulatorPair.lhs);
    }
    if (bySegment) {
        return Sequences.simple(bySegmentAccumulatorPair.lhs);
    }
    return Sequences.withBaggage(Sequences.simple(Iterables.transform(indexAccumulatorPair.lhs.iterableWithPostAggregations(null, query.isDescending()), new Function<Row, T>() {

        @Override
        public T apply(Row input) {
            return (T) input;
        }
    })), indexAccumulatorPair.lhs);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException) GroupByQuery(io.druid.query.groupby.GroupByQuery) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) List(java.util.List) Row(io.druid.data.input.Row) Queue(java.util.Queue)

Example 7 with GroupByQueryConfig

use of io.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.

the class RowBasedGrouperHelper method createGrouperAccumulatorPair.

/**
   * If isInputRaw is true, transformations such as timestamp truncation and extraction functions have not
   * been applied to the input rows yet, for example, in a nested query, if an extraction function is being
   * applied in the outer query to a field of the inner query. This method must apply those transformations.
   */
public static Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> createGrouperAccumulatorPair(final GroupByQuery query, final boolean isInputRaw, final Map<String, ValueType> rawInputRowSignature, final GroupByQueryConfig config, final Supplier<ByteBuffer> bufferSupplier, final int concurrencyHint, final LimitedTemporaryStorage temporaryStorage, final ObjectMapper spillMapper, final AggregatorFactory[] aggregatorFactories) {
    // concurrencyHint >= 1 for concurrent groupers, -1 for single-threaded
    Preconditions.checkArgument(concurrencyHint >= 1 || concurrencyHint == -1, "invalid concurrencyHint");
    final List<ValueType> valueTypes = DimensionHandlerUtils.getValueTypesFromDimensionSpecs(query.getDimensions());
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final boolean includeTimestamp = GroupByStrategyV2.getUniversalTimestamp(query) == null;
    final Grouper.KeySerdeFactory<RowBasedKey> keySerdeFactory = new RowBasedKeySerdeFactory(includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions().size(), querySpecificConfig.getMaxMergingDictionarySize() / (concurrencyHint == -1 ? 1 : concurrencyHint), valueTypes);
    final ThreadLocal<Row> columnSelectorRow = new ThreadLocal<>();
    final ColumnSelectorFactory columnSelectorFactory = query.getVirtualColumns().wrap(RowBasedColumnSelectorFactory.create(columnSelectorRow, rawInputRowSignature));
    final Grouper<RowBasedKey> grouper;
    if (concurrencyHint == -1) {
        grouper = new SpillingGrouper<>(bufferSupplier, keySerdeFactory, columnSelectorFactory, aggregatorFactories, querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, true);
    } else {
        grouper = new ConcurrentGrouper<>(bufferSupplier, keySerdeFactory, columnSelectorFactory, aggregatorFactories, querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, concurrencyHint);
    }
    final int keySize = includeTimestamp ? query.getDimensions().size() + 1 : query.getDimensions().size();
    final ValueExtractFunction valueExtractFn = makeValueExtractFunction(query, isInputRaw, includeTimestamp, columnSelectorFactory, rawInputRowSignature, valueTypes);
    final Accumulator<Grouper<RowBasedKey>, Row> accumulator = new Accumulator<Grouper<RowBasedKey>, Row>() {

        @Override
        public Grouper<RowBasedKey> accumulate(final Grouper<RowBasedKey> theGrouper, final Row row) {
            BaseQuery.checkInterrupted();
            if (theGrouper == null) {
                // Pass-through null returns without doing more work.
                return null;
            }
            if (!theGrouper.isInitialized()) {
                theGrouper.init();
            }
            columnSelectorRow.set(row);
            final Comparable[] key = new Comparable[keySize];
            valueExtractFn.apply(row, key);
            final boolean didAggregate = theGrouper.aggregate(new RowBasedKey(key));
            if (!didAggregate) {
                // null return means grouping resources were exhausted.
                return null;
            }
            columnSelectorRow.set(null);
            return theGrouper;
        }
    };
    return new Pair<>(grouper, accumulator);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) RowBasedColumnSelectorFactory(io.druid.query.groupby.RowBasedColumnSelectorFactory) ColumnSelectorFactory(io.druid.segment.ColumnSelectorFactory) ValueType(io.druid.segment.column.ValueType) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) Row(io.druid.data.input.Row) MapBasedRow(io.druid.data.input.MapBasedRow) Pair(io.druid.java.util.common.Pair)

Example 8 with GroupByQueryConfig

use of io.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.

the class DistinctCountGroupByQueryTest method testGroupByWithDistinctCountAgg.

@Test
public void testGroupByWithDistinctCountAgg() throws Exception {
    final GroupByQueryConfig config = new GroupByQueryConfig();
    config.setMaxIntermediateRows(10000);
    final GroupByQueryRunnerFactory factory = GroupByQueryRunnerTest.makeQueryRunnerFactory(config);
    IncrementalIndex index = new OnheapIncrementalIndex(0, Granularities.SECOND, new AggregatorFactory[] { new CountAggregatorFactory("cnt") }, 1000);
    String visitor_id = "visitor_id";
    String client_type = "client_type";
    long timestamp = System.currentTimeMillis();
    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.<String, Object>of(visitor_id, "0", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp + 1, Lists.newArrayList(visitor_id, client_type), ImmutableMap.<String, Object>of(visitor_id, "1", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp + 2, Lists.newArrayList(visitor_id, client_type), ImmutableMap.<String, Object>of(visitor_id, "2", client_type, "android")));
    GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.dataSource).setGranularity(QueryRunnerTestHelper.allGran).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec(client_type, client_type))).setInterval(QueryRunnerTestHelper.fullOnInterval).setLimitSpec(new DefaultLimitSpec(Lists.newArrayList(new OrderByColumnSpec(client_type, OrderByColumnSpec.Direction.DESCENDING)), 10)).setAggregatorSpecs(Lists.newArrayList(QueryRunnerTestHelper.rowsCount, new DistinctCountAggregatorFactory("UV", visitor_id, null))).build();
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, factory.createRunner(incrementalIndexSegment), query);
    List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("1970-01-01T00:00:00.000Z", client_type, "iphone", "UV", 2L, "rows", 2L), GroupByQueryRunnerTestHelper.createExpectedRow("1970-01-01T00:00:00.000Z", client_type, "android", "UV", 1L, "rows", 1L));
    TestHelper.assertExpectedObjects(expectedResults, results, "distinct-count");
}
Also used : GroupByQueryRunnerFactory(io.druid.query.groupby.GroupByQueryRunnerFactory) DefaultLimitSpec(io.druid.query.groupby.orderby.DefaultLimitSpec) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) Segment(io.druid.segment.Segment) IncrementalIndexSegment(io.druid.segment.IncrementalIndexSegment) OrderByColumnSpec(io.druid.query.groupby.orderby.OrderByColumnSpec) GroupByQuery(io.druid.query.groupby.GroupByQuery) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) Row(io.druid.data.input.Row) Test(org.junit.Test) GroupByQueryRunnerTest(io.druid.query.groupby.GroupByQueryRunnerTest)

Example 9 with GroupByQueryConfig

use of io.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.

the class GroupByBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + +System.currentTimeMillis());
    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }
    executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");
    setupQueries();
    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];
    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);
    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);
        final IncrementalIndex index = makeIncIndex(schemaInfo.isWithRollup());
        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }
        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment, rowsPerSegment * numSegments, i + 1, numSegments);
        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec());
        queryableIndexes.add(INDEX_IO.loadIndex(file));
        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }
    StupidPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new BlockingPool<>(new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);
    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {

        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };
    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));
    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector, QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}
Also used : GroupByStrategySelector(io.druid.query.groupby.strategy.GroupByStrategySelector) IndexSpec(io.druid.segment.IndexSpec) BenchmarkDataGenerator(io.druid.benchmark.datagen.BenchmarkDataGenerator) HyperUniquesSerde(io.druid.query.aggregation.hyperloglog.HyperUniquesSerde) GroupByQueryQueryToolChest(io.druid.query.groupby.GroupByQueryQueryToolChest) GroupByStrategyV1(io.druid.query.groupby.strategy.GroupByStrategyV1) GroupByStrategyV2(io.druid.query.groupby.strategy.GroupByStrategyV2) GroupByQueryEngine(io.druid.query.groupby.GroupByQueryEngine) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryRunnerFactory(io.druid.query.groupby.GroupByQueryRunnerFactory) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) ByteBuffer(java.nio.ByteBuffer) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) OffheapBufferGenerator(io.druid.offheap.OffheapBufferGenerator) InputRow(io.druid.data.input.InputRow) BlockingPool(io.druid.collections.BlockingPool) StupidPool(io.druid.collections.StupidPool) DruidProcessingConfig(io.druid.query.DruidProcessingConfig) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Example 10 with GroupByQueryConfig

use of io.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.

the class RealtimeManagerTest method initFactory.

private static GroupByQueryRunnerFactory initFactory() {
    final GroupByQueryConfig config = new GroupByQueryConfig();
    config.setMaxIntermediateRows(10000);
    return GroupByQueryRunnerTest.makeQueryRunnerFactory(config);
}
Also used : GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig)

Aggregations

GroupByQueryConfig (io.druid.query.groupby.GroupByQueryConfig)12 Row (io.druid.data.input.Row)6 GroupByQuery (io.druid.query.groupby.GroupByQuery)6 Accumulator (io.druid.java.util.common.guava.Accumulator)4 GroupByQueryRunnerFactory (io.druid.query.groupby.GroupByQueryRunnerFactory)4 IncrementalIndex (io.druid.segment.incremental.IncrementalIndex)4 File (java.io.File)4 List (java.util.List)4 Pair (io.druid.java.util.common.Pair)3 QueryRunner (io.druid.query.QueryRunner)3 DefaultDimensionSpec (io.druid.query.dimension.DefaultDimensionSpec)3 GroupByQueryRunnerTest (io.druid.query.groupby.GroupByQueryRunnerTest)3 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)3 ByteBuffer (java.nio.ByteBuffer)3 Test (org.junit.Test)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)2 ImmutableList (com.google.common.collect.ImmutableList)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 BenchmarkDataGenerator (io.druid.benchmark.datagen.BenchmarkDataGenerator)2