Search in sources :

Example 61 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class SketchAggregationWithSimpleDataTest method testSimpleDataIngestAndTimeseriesQuery.

@Test
public void testSimpleDataIngestAndTimeseriesQuery() throws Exception {
    AggregationTestHelper timeseriesQueryAggregationTestHelper = AggregationTestHelper.createTimeseriesQueryAggregationTestHelper(sm.getJacksonModules(), tempFolder);
    Sequence seq = timeseriesQueryAggregationTestHelper.runQueryOnSegments(ImmutableList.of(s1, s2), (Query) SketchAggregationTest.readQueryFromClasspath("timeseries_query.json", timeseriesQueryAggregationTestHelper.getObjectMapper(), vectorize));
    Result<TimeseriesResultValue> result = (Result<TimeseriesResultValue>) Iterables.getOnlyElement(seq.toList());
    Assert.assertEquals(DateTimes.of("2014-10-20T00:00:00.000Z"), result.getTimestamp());
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, result.getValue().getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, result.getValue().getDoubleMetric("non_existing_col_validation"), 0.01);
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) Sequence(org.apache.druid.java.util.common.guava.Sequence) Result(org.apache.druid.query.Result) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 62 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class SketchAggregationWithSimpleDataTest method testTopNQueryWithSketchConstant.

@Test
public void testTopNQueryWithSketchConstant() throws Exception {
    AggregationTestHelper topNQueryAggregationTestHelper = AggregationTestHelper.createTopNQueryAggregationTestHelper(sm.getJacksonModules(), tempFolder);
    Sequence seq = topNQueryAggregationTestHelper.runQueryOnSegments(ImmutableList.of(s1, s2), (Query) SketchAggregationTest.readQueryFromClasspath("topn_query_sketch_const.json", topNQueryAggregationTestHelper.getObjectMapper(), vectorize));
    Result<TopNResultValue> result = (Result<TopNResultValue>) Iterables.getOnlyElement(seq.toList());
    Assert.assertEquals(DateTimes.of("2014-10-20T00:00:00.000Z"), result.getTimestamp());
    DimensionAndMetricValueExtractor value1 = Iterables.get(result.getValue().getValue(), 0);
    Assert.assertEquals(38.0, value1.getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(38.0, value1.getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(2.0, value1.getDoubleMetric("sketchEstimatePostAggForSketchConstant"), 0.01);
    Assert.assertEquals(39.0, value1.getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(1.0, value1.getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(37.0, value1.getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals("product_3", value1.getDimensionValue("product"));
    DimensionAndMetricValueExtractor value2 = Iterables.get(result.getValue().getValue(), 1);
    Assert.assertEquals(42.0, value2.getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(42.0, value2.getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(2.0, value2.getDoubleMetric("sketchEstimatePostAggForSketchConstant"), 0.01);
    Assert.assertEquals(42.0, value2.getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(2.0, value2.getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(40.0, value2.getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals("product_1", value2.getDimensionValue("product"));
    DimensionAndMetricValueExtractor value3 = Iterables.get(result.getValue().getValue(), 2);
    Assert.assertEquals(42.0, value3.getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(42.0, value3.getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(2.0, value3.getDoubleMetric("sketchEstimatePostAggForSketchConstant"), 0.01);
    Assert.assertEquals(42.0, value3.getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(2.0, value3.getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(40.0, value3.getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals("product_2", value3.getDimensionValue("product"));
}
Also used : TopNResultValue(org.apache.druid.query.topn.TopNResultValue) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) Sequence(org.apache.druid.java.util.common.guava.Sequence) DimensionAndMetricValueExtractor(org.apache.druid.query.topn.DimensionAndMetricValueExtractor) Result(org.apache.druid.query.Result) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 63 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class OldApiSketchAggregationTest method testSimpleDataIngestAndQuery.

@Test
public void testSimpleDataIngestAndQuery() throws Exception {
    final String groupByQueryString = readFileFromClasspathAsString("oldapi/old_simple_test_data_group_by_query.json");
    final GroupByQuery groupByQuery = (GroupByQuery) helper.getObjectMapper().readValue(groupByQueryString, Query.class);
    final Sequence seq = helper.createIndexAndRunQueryOnSegment(new File(this.getClass().getClassLoader().getResource("simple_test_data.tsv").getFile()), readFileFromClasspathAsString("simple_test_data_record_parser.json"), readFileFromClasspathAsString("oldapi/old_simple_test_data_aggregators.json"), 0, Granularities.NONE, 1000, groupByQueryString);
    List results = seq.toList();
    Assert.assertEquals(1, results.size());
    Assert.assertEquals(ResultRow.fromLegacyRow(new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("sketch_count", 50.0).put("sketchEstimatePostAgg", 50.0).put("sketchUnionPostAggEstimate", 50.0).put("sketchIntersectionPostAggEstimate", 50.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), groupByQuery), results.get(0));
}
Also used : MapBasedRow(org.apache.druid.data.input.MapBasedRow) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) ArrayList(java.util.ArrayList) List(java.util.List) Sequence(org.apache.druid.java.util.common.guava.Sequence) File(java.io.File) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 64 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class GroupByStrategyV2 method mergeResults.

@Override
public Sequence<ResultRow> mergeResults(final QueryRunner<ResultRow> baseRunner, final GroupByQuery query, final ResponseContext responseContext) {
    // Merge streams using ResultMergeQueryRunner, then apply postaggregators, then apply limit (which may
    // involve materialization)
    final ResultMergeQueryRunner<ResultRow> mergingQueryRunner = new ResultMergeQueryRunner<>(baseRunner, this::createResultComparator, this::createMergeFn);
    // Set up downstream context.
    final ImmutableMap.Builder<String, Object> context = ImmutableMap.builder();
    context.put("finalize", false);
    context.put(GroupByQueryConfig.CTX_KEY_STRATEGY, GroupByStrategySelector.STRATEGY_V2);
    context.put(CTX_KEY_OUTERMOST, false);
    Granularity granularity = query.getGranularity();
    List<DimensionSpec> dimensionSpecs = query.getDimensions();
    // the CTX_TIMESTAMP_RESULT_FIELD is set in DruidQuery.java
    final String timestampResultField = query.getContextValue(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD);
    final boolean hasTimestampResultField = (timestampResultField != null && !timestampResultField.isEmpty()) && query.getContextBoolean(CTX_KEY_OUTERMOST, true) && !query.isApplyLimitPushDown();
    int timestampResultFieldIndex = 0;
    if (hasTimestampResultField) {
        // sql like "group by city_id,time_floor(__time to day)",
        // the original translated query is granularity=all and dimensions:[d0, d1]
        // the better plan is granularity=day and dimensions:[d0]
        // but the ResultRow structure is changed from [d0, d1] to [__time, d0]
        // this structure should be fixed as [d0, d1] (actually it is [d0, __time]) before postAggs are called.
        // 
        // the above is the general idea of this optimization.
        // but from coding perspective, the granularity=all and "d0" dimension are referenced by many places,
        // eg: subtotals, having, grouping set, post agg,
        // there would be many many places need to be fixed if "d0" dimension is removed from query.dimensions
        // and the same to the granularity change.
        // so from easier coding perspective, this optimization is coded as groupby engine-level inner process change.
        // the most part of codes are in GroupByStrategyV2 about the process change between broker and compute node.
        // the basic logic like nested queries and subtotals are kept unchanged,
        // they will still see the granularity=all and the "d0" dimension.
        // 
        // the tradeoff is that GroupByStrategyV2 behaviors differently according to the query contexts set in DruidQuery
        // in another word,
        // the query generated by "explain plan for select ..." doesn't match to the native query ACTUALLY being executed,
        // the granularity and dimensions are slightly different.
        // now, part of the query plan logic is handled in GroupByStrategyV2, not only in DruidQuery.toGroupByQuery()
        final Granularity timestampResultFieldGranularity = query.getContextValue(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY);
        dimensionSpecs = query.getDimensions().stream().filter(dimensionSpec -> !dimensionSpec.getOutputName().equals(timestampResultField)).collect(Collectors.toList());
        granularity = timestampResultFieldGranularity;
        // when timestampResultField is the last dimension, should set sortByDimsFirst=true,
        // otherwise the downstream is sorted by row's timestamp first which makes the final ordering not as expected
        timestampResultFieldIndex = query.getContextValue(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_INDEX);
        if (!query.getContextSortByDimsFirst() && timestampResultFieldIndex == query.getDimensions().size() - 1) {
            context.put(GroupByQuery.CTX_KEY_SORT_BY_DIMS_FIRST, true);
        }
        // it is actually equals to sortByDimsFirst=false
        if (query.getContextSortByDimsFirst() && timestampResultFieldIndex == 0) {
            context.put(GroupByQuery.CTX_KEY_SORT_BY_DIMS_FIRST, false);
        }
    // when hasTimestampResultField=true and timestampResultField is neither first nor last dimension,
    // the DefaultLimitSpec will always do the reordering
    }
    final int timestampResultFieldIndexInOriginalDimensions = timestampResultFieldIndex;
    if (query.getUniversalTimestamp() != null && !hasTimestampResultField) {
        // universalTimestamp works only when granularity is all
        // hasTimestampResultField works only when granularity is all
        // fudgeTimestamp should not be used when hasTimestampResultField=true due to the row's actual timestamp is used
        context.put(CTX_KEY_FUDGE_TIMESTAMP, String.valueOf(query.getUniversalTimestamp().getMillis()));
    }
    // The having spec shouldn't be passed down, so we need to convey the existing limit push down status
    context.put(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, query.isApplyLimitPushDown());
    // Always request array result rows when passing the query downstream.
    context.put(GroupByQueryConfig.CTX_KEY_ARRAY_RESULT_ROWS, true);
    final GroupByQuery newQuery = new GroupByQuery(query.getDataSource(), query.getQuerySegmentSpec(), query.getVirtualColumns(), query.getDimFilter(), granularity, dimensionSpecs, query.getAggregatorSpecs(), // Don't apply postaggregators on compute nodes
    ImmutableList.of(), // Don't do "having" clause until the end of this method.
    null, // higher-up).
    query.isApplyLimitPushDown() ? ((DefaultLimitSpec) query.getLimitSpec()).withOffsetToLimit() : null, query.getSubtotalsSpec(), query.getContext()).withOverriddenContext(context.build());
    final Sequence<ResultRow> mergedResults = mergingQueryRunner.run(QueryPlus.wrap(newQuery), responseContext);
    if (!query.getContextBoolean(CTX_KEY_OUTERMOST, true) || query.getContextBoolean(GroupByQueryConfig.CTX_KEY_EXECUTING_NESTED_QUERY, false)) {
        return mergedResults;
    } else if (query.getPostAggregatorSpecs().isEmpty()) {
        if (!hasTimestampResultField) {
            return mergedResults;
        }
        return Sequences.map(mergedResults, row -> {
            final ResultRow resultRow = ResultRow.create(query.getResultRowSizeWithoutPostAggregators());
            moveOrReplicateTimestampInRow(query, timestampResultFieldIndexInOriginalDimensions, row, resultRow);
            return resultRow;
        });
    } else {
        return Sequences.map(mergedResults, row -> {
            // This function's purpose is to apply PostAggregators.
            final ResultRow rowWithPostAggregations = ResultRow.create(query.getResultRowSizeWithPostAggregators());
            // Copy everything that comes before the postaggregations.
            if (hasTimestampResultField) {
                moveOrReplicateTimestampInRow(query, timestampResultFieldIndexInOriginalDimensions, row, rowWithPostAggregations);
            } else {
                for (int i = 0; i < query.getResultRowPostAggregatorStart(); i++) {
                    rowWithPostAggregations.set(i, row.get(i));
                }
            }
            // Compute postaggregations. We need to do this with a result-row map because PostAggregator.compute
            // expects a map. Some further design adjustment may eliminate the need for it, and speed up this function.
            final Map<String, Object> mapForPostAggregationComputation = rowWithPostAggregations.toMap(query);
            for (int i = 0; i < query.getPostAggregatorSpecs().size(); i++) {
                final PostAggregator postAggregator = query.getPostAggregatorSpecs().get(i);
                final Object value = postAggregator.compute(mapForPostAggregationComputation);
                rowWithPostAggregations.set(query.getResultRowPostAggregatorStart() + i, value);
                mapForPostAggregationComputation.put(postAggregator.getName(), value);
            }
            return rowWithPostAggregations;
        });
    }
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) ResultMergeQueryRunner(org.apache.druid.query.ResultMergeQueryRunner) QueryPlus(org.apache.druid.query.QueryPlus) GroupByQueryEngineV2(org.apache.druid.query.groupby.epinephelinae.GroupByQueryEngineV2) Inject(com.google.inject.Inject) Smile(org.apache.druid.guice.annotations.Smile) Merging(org.apache.druid.guice.annotations.Merging) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) ResultMergeQueryRunner(org.apache.druid.query.ResultMergeQueryRunner) StorageAdapter(org.apache.druid.segment.StorageAdapter) ByteBuffer(java.nio.ByteBuffer) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) GroupByBinaryFnV2(org.apache.druid.query.groupby.epinephelinae.GroupByBinaryFnV2) QueryWatcher(org.apache.druid.query.QueryWatcher) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) Sequence(org.apache.druid.java.util.common.guava.Sequence) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) GroupByMergingQueryRunnerV2(org.apache.druid.query.groupby.epinephelinae.GroupByMergingQueryRunnerV2) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) ResultRow(org.apache.druid.query.groupby.ResultRow) DataSource(org.apache.druid.query.DataSource) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) Collectors(java.util.stream.Collectors) QueryContexts(org.apache.druid.query.QueryContexts) BinaryOperator(java.util.function.BinaryOperator) BlockingPool(org.apache.druid.collections.BlockingPool) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) GroupByRowProcessor(org.apache.druid.query.groupby.epinephelinae.GroupByRowProcessor) NoopLimitSpec(org.apache.druid.query.groupby.orderby.NoopLimitSpec) Granularity(org.apache.druid.java.util.common.granularity.Granularity) NonBlockingPool(org.apache.druid.collections.NonBlockingPool) Intervals(org.apache.druid.java.util.common.Intervals) Supplier(com.google.common.base.Supplier) GroupByQueryResource(org.apache.druid.query.groupby.resource.GroupByQueryResource) Utils(org.apache.druid.java.util.common.collect.Utils) ArrayList(java.util.ArrayList) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) Suppliers(com.google.common.base.Suppliers) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Sequences(org.apache.druid.java.util.common.guava.Sequences) VirtualColumns(org.apache.druid.segment.VirtualColumns) ResponseContext(org.apache.druid.query.context.ResponseContext) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) Global(org.apache.druid.guice.annotations.Global) LimitSpec(org.apache.druid.query.groupby.orderby.LimitSpec) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) CloseableUtils(org.apache.druid.utils.CloseableUtils) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) Granularity(org.apache.druid.java.util.common.granularity.Granularity) ImmutableMap(com.google.common.collect.ImmutableMap) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 65 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class GroupByStrategyV2 method processSubtotalsSpec.

@Override
public Sequence<ResultRow> processSubtotalsSpec(GroupByQuery query, GroupByQueryResource resource, Sequence<ResultRow> queryResult) {
    // How it works?
    // First we accumulate the result of top level base query aka queryResult arg inside a resultSupplierOne object.
    // Next for each subtotalSpec
    // If subtotalSpec is a prefix of top level dims then we iterate on rows in resultSupplierOne object which are still
    // sorted by subtotalSpec, stream merge them and return.
    // 
    // If subtotalSpec is not a prefix of top level dims then we create a resultSupplierTwo object filled with rows from
    // resultSupplierOne object with only dims from subtotalSpec. Then we iterate on rows in resultSupplierTwo object which are
    // of course sorted by subtotalSpec, stream merge them and return.
    // Keep a reference to resultSupplier outside the "try" so we can close it if something goes wrong
    // while creating the sequence.
    GroupByRowProcessor.ResultSupplier resultSupplierOne = null;
    try {
        // baseSubtotalQuery is the original query with dimensions and aggregators rewritten to apply to the *results*
        // rather than *inputs* of that query. It has its virtual columns and dim filter removed, because those only
        // make sense when applied to inputs. Finally, it has subtotalsSpec removed, since we'll be computing them
        // one-by-one soon enough.
        GroupByQuery baseSubtotalQuery = query.withDimensionSpecs(query.getDimensions().stream().map(dimSpec -> new DefaultDimensionSpec(dimSpec.getOutputName(), dimSpec.getOutputName(), dimSpec.getOutputType())).collect(Collectors.toList())).withAggregatorSpecs(query.getAggregatorSpecs().stream().map(AggregatorFactory::getCombiningFactory).collect(Collectors.toList())).withVirtualColumns(VirtualColumns.EMPTY).withDimFilter(null).withSubtotalsSpec(null).withOverriddenContext(ImmutableMap.of(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD, ""));
        resultSupplierOne = GroupByRowProcessor.process(baseSubtotalQuery, baseSubtotalQuery, queryResult, configSupplier.get(), resource, spillMapper, processingConfig.getTmpDir(), processingConfig.intermediateComputeSizeBytes());
        List<String> queryDimNames = baseSubtotalQuery.getDimensions().stream().map(DimensionSpec::getOutputName).collect(Collectors.toList());
        // Only needed to make LimitSpec.filterColumns(..) call later in case base query has a non default LimitSpec.
        Set<String> aggsAndPostAggs = null;
        if (!(baseSubtotalQuery.getLimitSpec() instanceof NoopLimitSpec)) {
            aggsAndPostAggs = getAggregatorAndPostAggregatorNames(baseSubtotalQuery);
        }
        List<List<String>> subtotals = query.getSubtotalsSpec();
        List<Sequence<ResultRow>> subtotalsResults = new ArrayList<>(subtotals.size());
        // Iterate through each subtotalSpec, build results for it and add to subtotalsResults
        for (List<String> subtotalSpec : subtotals) {
            final ImmutableSet<String> dimsInSubtotalSpec = ImmutableSet.copyOf(subtotalSpec);
            // Dimension spec including dimension name and output name
            final List<DimensionSpec> subTotalDimensionSpec = new ArrayList<>(dimsInSubtotalSpec.size());
            final List<DimensionSpec> dimensions = query.getDimensions();
            for (DimensionSpec dimensionSpec : dimensions) {
                if (dimsInSubtotalSpec.contains(dimensionSpec.getOutputName())) {
                    subTotalDimensionSpec.add(dimensionSpec);
                }
            }
            // Create appropriate LimitSpec for subtotal query
            LimitSpec subtotalQueryLimitSpec = NoopLimitSpec.instance();
            if (!(baseSubtotalQuery.getLimitSpec() instanceof NoopLimitSpec)) {
                Set<String> columns = new HashSet<>(aggsAndPostAggs);
                columns.addAll(subtotalSpec);
                subtotalQueryLimitSpec = baseSubtotalQuery.getLimitSpec().filterColumns(columns);
            }
            GroupByQuery subtotalQuery = baseSubtotalQuery.withLimitSpec(subtotalQueryLimitSpec);
            final GroupByRowProcessor.ResultSupplier resultSupplierOneFinal = resultSupplierOne;
            if (Utils.isPrefix(subtotalSpec, queryDimNames)) {
                // Since subtotalSpec is a prefix of base query dimensions, so results from base query are also sorted
                // by subtotalSpec as needed by stream merging.
                subtotalsResults.add(processSubtotalsResultAndOptionallyClose(() -> resultSupplierOneFinal, subTotalDimensionSpec, subtotalQuery, false));
            } else {
                // Since subtotalSpec is not a prefix of base query dimensions, so results from base query are not sorted
                // by subtotalSpec. So we first add the result of base query into another resultSupplier which are sorted
                // by subtotalSpec and then stream merge them.
                // Also note, we can't create the ResultSupplier eagerly here or as we don't want to eagerly allocate
                // merge buffers for processing subtotal.
                Supplier<GroupByRowProcessor.ResultSupplier> resultSupplierTwo = () -> GroupByRowProcessor.process(baseSubtotalQuery, subtotalQuery, resultSupplierOneFinal.results(subTotalDimensionSpec), configSupplier.get(), resource, spillMapper, processingConfig.getTmpDir(), processingConfig.intermediateComputeSizeBytes());
                subtotalsResults.add(processSubtotalsResultAndOptionallyClose(resultSupplierTwo, subTotalDimensionSpec, subtotalQuery, true));
            }
        }
        return Sequences.withBaggage(query.postProcess(Sequences.concat(subtotalsResults)), // this will close resources allocated by resultSupplierOne after sequence read
        resultSupplierOne);
    } catch (Throwable e) {
        throw CloseableUtils.closeAndWrapInCatch(e, resultSupplierOne);
    }
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) GroupByQueryEngineV2(org.apache.druid.query.groupby.epinephelinae.GroupByQueryEngineV2) Inject(com.google.inject.Inject) Smile(org.apache.druid.guice.annotations.Smile) Merging(org.apache.druid.guice.annotations.Merging) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) ResultMergeQueryRunner(org.apache.druid.query.ResultMergeQueryRunner) StorageAdapter(org.apache.druid.segment.StorageAdapter) ByteBuffer(java.nio.ByteBuffer) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) GroupByBinaryFnV2(org.apache.druid.query.groupby.epinephelinae.GroupByBinaryFnV2) QueryWatcher(org.apache.druid.query.QueryWatcher) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) Sequence(org.apache.druid.java.util.common.guava.Sequence) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) GroupByMergingQueryRunnerV2(org.apache.druid.query.groupby.epinephelinae.GroupByMergingQueryRunnerV2) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) ResultRow(org.apache.druid.query.groupby.ResultRow) DataSource(org.apache.druid.query.DataSource) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) Collectors(java.util.stream.Collectors) QueryContexts(org.apache.druid.query.QueryContexts) BinaryOperator(java.util.function.BinaryOperator) BlockingPool(org.apache.druid.collections.BlockingPool) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) GroupByRowProcessor(org.apache.druid.query.groupby.epinephelinae.GroupByRowProcessor) NoopLimitSpec(org.apache.druid.query.groupby.orderby.NoopLimitSpec) Granularity(org.apache.druid.java.util.common.granularity.Granularity) NonBlockingPool(org.apache.druid.collections.NonBlockingPool) Intervals(org.apache.druid.java.util.common.Intervals) Supplier(com.google.common.base.Supplier) GroupByQueryResource(org.apache.druid.query.groupby.resource.GroupByQueryResource) Utils(org.apache.druid.java.util.common.collect.Utils) ArrayList(java.util.ArrayList) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) Suppliers(com.google.common.base.Suppliers) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Sequences(org.apache.druid.java.util.common.guava.Sequences) VirtualColumns(org.apache.druid.segment.VirtualColumns) ResponseContext(org.apache.druid.query.context.ResponseContext) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) Global(org.apache.druid.guice.annotations.Global) LimitSpec(org.apache.druid.query.groupby.orderby.LimitSpec) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) CloseableUtils(org.apache.druid.utils.CloseableUtils) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) GroupByRowProcessor(org.apache.druid.query.groupby.epinephelinae.GroupByRowProcessor) ArrayList(java.util.ArrayList) Sequence(org.apache.druid.java.util.common.guava.Sequence) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) NoopLimitSpec(org.apache.druid.query.groupby.orderby.NoopLimitSpec) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) NoopLimitSpec(org.apache.druid.query.groupby.orderby.NoopLimitSpec) LimitSpec(org.apache.druid.query.groupby.orderby.LimitSpec) HashSet(java.util.HashSet)

Aggregations

Sequence (org.apache.druid.java.util.common.guava.Sequence)102 Test (org.junit.Test)53 List (java.util.List)44 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)37 ResponseContext (org.apache.druid.query.context.ResponseContext)32 ImmutableList (com.google.common.collect.ImmutableList)29 Intervals (org.apache.druid.java.util.common.Intervals)28 Granularities (org.apache.druid.java.util.common.granularity.Granularities)28 QueryRunner (org.apache.druid.query.QueryRunner)28 ArrayList (java.util.ArrayList)27 VirtualColumns (org.apache.druid.segment.VirtualColumns)26 Cursor (org.apache.druid.segment.Cursor)25 QueryPlus (org.apache.druid.query.QueryPlus)24 Result (org.apache.druid.query.Result)24 NullHandling (org.apache.druid.common.config.NullHandling)22 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)22 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)21 QueryableIndexStorageAdapter (org.apache.druid.segment.QueryableIndexStorageAdapter)20 DataSegment (org.apache.druid.timeline.DataSegment)20 ImmutableMap (com.google.common.collect.ImmutableMap)18