Search in sources :

Example 41 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class DefaultLimitSpec method build.

@Override
public Function<Sequence<ResultRow>, Sequence<ResultRow>> build(final GroupByQuery query) {
    final List<DimensionSpec> dimensions = query.getDimensions();
    // Can avoid re-sorting if the natural ordering is good enough.
    boolean sortingNeeded = dimensions.size() < columns.size();
    final Set<String> aggAndPostAggNames = new HashSet<>();
    for (AggregatorFactory agg : query.getAggregatorSpecs()) {
        aggAndPostAggNames.add(agg.getName());
    }
    for (PostAggregator postAgg : query.getPostAggregatorSpecs()) {
        aggAndPostAggNames.add(postAgg.getName());
    }
    if (!sortingNeeded) {
        for (int i = 0; i < columns.size(); i++) {
            final OrderByColumnSpec columnSpec = columns.get(i);
            if (aggAndPostAggNames.contains(columnSpec.getDimension())) {
                sortingNeeded = true;
                break;
            }
            final ColumnType columnType = getOrderByType(columnSpec, dimensions);
            final StringComparator naturalComparator;
            if (columnType.is(ValueType.STRING)) {
                naturalComparator = StringComparators.LEXICOGRAPHIC;
            } else if (columnType.isNumeric()) {
                naturalComparator = StringComparators.NUMERIC;
            } else if (columnType.isArray()) {
                if (columnType.getElementType().isNumeric()) {
                    naturalComparator = StringComparators.NUMERIC;
                } else {
                    naturalComparator = StringComparators.LEXICOGRAPHIC;
                }
            } else {
                sortingNeeded = true;
                break;
            }
            if (columnSpec.getDirection() != OrderByColumnSpec.Direction.ASCENDING || !columnSpec.getDimensionComparator().equals(naturalComparator) || !columnSpec.getDimension().equals(dimensions.get(i).getOutputName())) {
                sortingNeeded = true;
                break;
            }
        }
    }
    if (!sortingNeeded) {
        // If granularity is ALL, sortByDimsFirst doesn't change the sorting order.
        sortingNeeded = !query.getGranularity().equals(Granularities.ALL) && query.getContextSortByDimsFirst();
    }
    if (!sortingNeeded) {
        String timestampField = query.getContextValue(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD);
        if (timestampField != null && !timestampField.isEmpty()) {
            int timestampResultFieldIndex = query.getContextValue(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_INDEX);
            sortingNeeded = query.getContextSortByDimsFirst() ? timestampResultFieldIndex != query.getDimensions().size() - 1 : timestampResultFieldIndex != 0;
        }
    }
    final Function<Sequence<ResultRow>, Sequence<ResultRow>> sortAndLimitFn;
    if (sortingNeeded) {
        // Materialize the Comparator first for fast-fail error checking.
        final Ordering<ResultRow> ordering = makeComparator(query.getResultRowSignature(), query.getResultRowHasTimestamp(), query.getDimensions(), query.getAggregatorSpecs(), query.getPostAggregatorSpecs(), query.getContextSortByDimsFirst());
        // underlying data isn't changing. (Useful for query reproducibility and offset-based pagination.)
        if (isLimited()) {
            sortAndLimitFn = results -> new TopNSequence<>(results, ordering, limit + offset);
        } else {
            sortAndLimitFn = results -> Sequences.sort(results, ordering).limit(limit + offset);
        }
    } else {
        if (isLimited()) {
            sortAndLimitFn = results -> results.limit(limit + offset);
        } else {
            sortAndLimitFn = Functions.identity();
        }
    }
    // Finally, apply offset after sorting and limiting.
    if (isOffset()) {
        return results -> sortAndLimitFn.apply(results).skip(offset);
    } else {
        return sortAndLimitFn;
    }
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) Iterables(com.google.common.collect.Iterables) Arrays(java.util.Arrays) ComparableList(org.apache.druid.segment.data.ComparableList) DimensionHandlerUtils(org.apache.druid.segment.DimensionHandlerUtils) Rows(org.apache.druid.data.input.Rows) HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) HashSet(java.util.HashSet) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) StringComparators(org.apache.druid.query.ordering.StringComparators) ComparableStringArray(org.apache.druid.segment.data.ComparableStringArray) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Sequences(org.apache.druid.java.util.common.guava.Sequences) Nullable(javax.annotation.Nullable) Functions(com.google.common.base.Functions) Sequence(org.apache.druid.java.util.common.guava.Sequence) Longs(com.google.common.primitives.Longs) Function(com.google.common.base.Function) StringComparator(org.apache.druid.query.ordering.StringComparator) ResultRow(org.apache.druid.query.groupby.ResultRow) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) ValueType(org.apache.druid.segment.column.ValueType) Collectors(java.util.stream.Collectors) Granularities(org.apache.druid.java.util.common.granularity.Granularities) Objects(java.util.Objects) List(java.util.List) Ordering(com.google.common.collect.Ordering) NullHandling(org.apache.druid.common.config.NullHandling) RowSignature(org.apache.druid.segment.column.RowSignature) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) JsonInclude(com.fasterxml.jackson.annotation.JsonInclude) ColumnType(org.apache.druid.segment.column.ColumnType) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) TopNSequence(org.apache.druid.java.util.common.guava.TopNSequence) Collections(java.util.Collections) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) ColumnType(org.apache.druid.segment.column.ColumnType) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) Sequence(org.apache.druid.java.util.common.guava.Sequence) TopNSequence(org.apache.druid.java.util.common.guava.TopNSequence) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringComparator(org.apache.druid.query.ordering.StringComparator) HashSet(java.util.HashSet)

Example 42 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class MovingAverageQueryRunner method run.

@Override
public Sequence<Row> run(QueryPlus<Row> query, ResponseContext responseContext) {
    MovingAverageQuery maq = (MovingAverageQuery) query.getQuery();
    List<Interval> intervals;
    final Period period;
    // Get the largest bucket from the list of averagers
    Optional<Integer> opt = maq.getAveragerSpecs().stream().map(AveragerFactory::getNumBuckets).max(Integer::compare);
    int buckets = opt.orElse(0);
    // Extend the interval beginning by specified bucket - 1
    if (maq.getGranularity() instanceof PeriodGranularity) {
        period = ((PeriodGranularity) maq.getGranularity()).getPeriod();
        int offset = buckets <= 0 ? 0 : (1 - buckets);
        intervals = maq.getIntervals().stream().map(i -> new Interval(i.getStart().withPeriodAdded(period, offset), i.getEnd())).collect(Collectors.toList());
    } else {
        throw new ISE("Only PeriodGranulaity is supported for movingAverage queries");
    }
    Sequence<Row> resultsSeq;
    DataSource dataSource = maq.getDataSource();
    if (maq.getDimensions() != null && !maq.getDimensions().isEmpty() && (dataSource instanceof TableDataSource || dataSource instanceof UnionDataSource || dataSource instanceof QueryDataSource)) {
        // build groupBy query from movingAverage query
        GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(dataSource).setInterval(intervals).setDimFilter(maq.getFilter()).setGranularity(maq.getGranularity()).setDimensions(maq.getDimensions()).setAggregatorSpecs(maq.getAggregatorSpecs()).setPostAggregatorSpecs(maq.getPostAggregatorSpecs()).setContext(maq.getContext());
        GroupByQuery gbq = builder.build();
        ResponseContext gbqResponseContext = ResponseContext.createEmpty();
        gbqResponseContext.merge(responseContext);
        gbqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(gbq));
        Sequence<ResultRow> results = gbq.getRunner(walker).run(QueryPlus.wrap(gbq), gbqResponseContext);
        try {
            // use localhost for remote address
            requestLogger.logNativeQuery(RequestLogLine.forNative(gbq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
        resultsSeq = results.map(row -> row.toMapBasedRow(gbq));
    } else {
        // no dimensions, so optimize this as a TimeSeries
        TimeseriesQuery tsq = new TimeseriesQuery(dataSource, new MultipleIntervalSegmentSpec(intervals), false, null, maq.getFilter(), maq.getGranularity(), maq.getAggregatorSpecs(), maq.getPostAggregatorSpecs(), 0, maq.getContext());
        ResponseContext tsqResponseContext = ResponseContext.createEmpty();
        tsqResponseContext.merge(responseContext);
        tsqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(tsq));
        Sequence<Result<TimeseriesResultValue>> results = tsq.getRunner(walker).run(QueryPlus.wrap(tsq), tsqResponseContext);
        try {
            // use localhost for remote address
            requestLogger.logNativeQuery(RequestLogLine.forNative(tsq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
        resultsSeq = Sequences.map(results, new TimeseriesResultToRow());
    }
    // Process into period buckets
    Sequence<RowBucket> bucketedMovingAvgResults = Sequences.simple(new RowBucketIterable(resultsSeq, intervals, period));
    // Apply the windows analysis functions
    Sequence<Row> movingAvgResults = Sequences.simple(new MovingAverageIterable(bucketedMovingAvgResults, maq.getDimensions(), maq.getAveragerSpecs(), maq.getPostAggregatorSpecs(), maq.getAggregatorSpecs()));
    // Apply any postAveragers
    Sequence<Row> movingAvgResultsWithPostAveragers = Sequences.map(movingAvgResults, new PostAveragerAggregatorCalculator(maq));
    // remove rows outside the reporting window
    List<Interval> reportingIntervals = maq.getIntervals();
    movingAvgResults = Sequences.filter(movingAvgResultsWithPostAveragers, row -> reportingIntervals.stream().anyMatch(i -> i.contains(row.getTimestamp())));
    // Apply any having, sorting, and limits
    movingAvgResults = maq.applyLimit(movingAvgResults);
    return movingAvgResults;
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) MapBasedRow(org.apache.druid.data.input.MapBasedRow) AveragerFactory(org.apache.druid.query.movingaverage.averagers.AveragerFactory) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) Row(org.apache.druid.data.input.Row) QueryStats(org.apache.druid.server.QueryStats) Interval(org.joda.time.Interval) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) Sequences(org.apache.druid.java.util.common.guava.Sequences) Nullable(javax.annotation.Nullable) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) Period(org.joda.time.Period) Function(com.google.common.base.Function) ImmutableMap(com.google.common.collect.ImmutableMap) ResponseContext(org.apache.druid.query.context.ResponseContext) ResultRow(org.apache.druid.query.groupby.ResultRow) DataSource(org.apache.druid.query.DataSource) Throwables(com.google.common.base.Throwables) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) RequestLogger(org.apache.druid.server.log.RequestLogger) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) QueryContexts(org.apache.druid.query.QueryContexts) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) Result(org.apache.druid.query.Result) List(java.util.List) UnionDataSource(org.apache.druid.query.UnionDataSource) RequestLogLine(org.apache.druid.server.RequestLogLine) Optional(java.util.Optional) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Result(org.apache.druid.query.Result) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) ResponseContext(org.apache.druid.query.context.ResponseContext) ISE(org.apache.druid.java.util.common.ISE) ResultRow(org.apache.druid.query.groupby.ResultRow) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) Period(org.joda.time.Period) UnionDataSource(org.apache.druid.query.UnionDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) UnionDataSource(org.apache.druid.query.UnionDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) QueryStats(org.apache.druid.server.QueryStats) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) ResultRow(org.apache.druid.query.groupby.ResultRow) Interval(org.joda.time.Interval)

Example 43 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class SketchAggregationWithSimpleDataTest method testSimpleDataIngestAndGpByQuery.

@Test
public void testSimpleDataIngestAndGpByQuery() throws Exception {
    try (final AggregationTestHelper gpByQueryAggregationTestHelper = AggregationTestHelper.createGroupByQueryAggregationTestHelper(sm.getJacksonModules(), config, tempFolder)) {
        final GroupByQuery groupByQuery = SketchAggregationTest.readQueryFromClasspath("simple_test_data_group_by_query.json", gpByQueryAggregationTestHelper.getObjectMapper(), vectorize);
        Sequence<ResultRow> seq = gpByQueryAggregationTestHelper.runQueryOnSegments(ImmutableList.of(s1, s2), groupByQuery);
        List<MapBasedRow> results = seq.map(row -> row.toMapBasedRow(groupByQuery)).toList();
        Assert.assertEquals(5, results.size());
        Assert.assertEquals(ImmutableList.of(new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("product", "product_3").put("sketch_count", 38.0).put("sketchEstimatePostAgg", 38.0).put("sketchUnionPostAggEstimate", 38.0).put("sketchIntersectionPostAggEstimate", 38.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("product", "product_1").put("sketch_count", 42.0).put("sketchEstimatePostAgg", 42.0).put("sketchUnionPostAggEstimate", 42.0).put("sketchIntersectionPostAggEstimate", 42.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("product", "product_2").put("sketch_count", 42.0).put("sketchEstimatePostAgg", 42.0).put("sketchUnionPostAggEstimate", 42.0).put("sketchIntersectionPostAggEstimate", 42.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("product", "product_4").put("sketch_count", 42.0).put("sketchEstimatePostAgg", 42.0).put("sketchUnionPostAggEstimate", 42.0).put("sketchIntersectionPostAggEstimate", 42.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("product", "product_5").put("sketch_count", 42.0).put("sketchEstimatePostAgg", 42.0).put("sketchUnionPostAggEstimate", 42.0).put("sketchIntersectionPostAggEstimate", 42.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build())), results);
    }
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) MapBasedRow(org.apache.druid.data.input.MapBasedRow) DimensionAndMetricValueExtractor(org.apache.druid.query.topn.DimensionAndMetricValueExtractor) Iterables(com.google.common.collect.Iterables) MapBasedRow(org.apache.druid.data.input.MapBasedRow) RunWith(org.junit.runner.RunWith) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) ArrayList(java.util.ArrayList) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) Files(com.google.common.io.Files) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) TopNResultValue(org.apache.druid.query.topn.TopNResultValue) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) ImmutableMap(com.google.common.collect.ImmutableMap) ResultRow(org.apache.druid.query.groupby.ResultRow) Collection(java.util.Collection) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test) IOException(java.io.IOException) QueryContexts(org.apache.druid.query.QueryContexts) File(java.io.File) StandardCharsets(java.nio.charset.StandardCharsets) Granularities(org.apache.druid.java.util.common.granularity.Granularities) Result(org.apache.druid.query.Result) List(java.util.List) Rule(org.junit.Rule) Assert(org.junit.Assert) TemporaryFolder(org.junit.rules.TemporaryFolder) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 44 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class SketchAggregationWithSimpleDataTest method testSimpleDataIngestAndTopNQuery.

@Test
public void testSimpleDataIngestAndTopNQuery() throws Exception {
    AggregationTestHelper topNQueryAggregationTestHelper = AggregationTestHelper.createTopNQueryAggregationTestHelper(sm.getJacksonModules(), tempFolder);
    Sequence seq = topNQueryAggregationTestHelper.runQueryOnSegments(ImmutableList.of(s1, s2), (Query) SketchAggregationTest.readQueryFromClasspath("topn_query.json", topNQueryAggregationTestHelper.getObjectMapper(), vectorize));
    Result<TopNResultValue> result = (Result<TopNResultValue>) Iterables.getOnlyElement(seq.toList());
    Assert.assertEquals(DateTimes.of("2014-10-20T00:00:00.000Z"), result.getTimestamp());
    DimensionAndMetricValueExtractor value = Iterables.getOnlyElement(result.getValue().getValue());
    Assert.assertEquals(38.0, value.getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(38.0, value.getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(38.0, value.getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(38.0, value.getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, value.getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, value.getDoubleMetric("non_existing_col_validation"), 0.01);
    Assert.assertEquals("product_3", value.getDimensionValue("product"));
}
Also used : TopNResultValue(org.apache.druid.query.topn.TopNResultValue) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) Sequence(org.apache.druid.java.util.common.guava.Sequence) DimensionAndMetricValueExtractor(org.apache.druid.query.topn.DimensionAndMetricValueExtractor) Result(org.apache.druid.query.Result) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 45 with Sequence

use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.

the class OldApiSketchAggregationTest method testSketchDataIngestAndQuery.

@Test
public void testSketchDataIngestAndQuery() throws Exception {
    final String groupByQueryString = readFileFromClasspathAsString("oldapi/old_sketch_test_data_group_by_query.json");
    final GroupByQuery groupByQuery = (GroupByQuery) helper.getObjectMapper().readValue(groupByQueryString, Query.class);
    final Sequence seq = helper.createIndexAndRunQueryOnSegment(new File(OldApiSketchAggregationTest.class.getClassLoader().getResource("sketch_test_data.tsv").getFile()), readFileFromClasspathAsString("sketch_test_data_record_parser.json"), readFileFromClasspathAsString("oldapi/old_sketch_test_data_aggregators.json"), 0, Granularities.NONE, 1000, groupByQueryString);
    List results = seq.toList();
    Assert.assertEquals(1, results.size());
    Assert.assertEquals(ResultRow.fromLegacyRow(new MapBasedRow(DateTimes.of("2014-10-19T00:00:00.000Z"), ImmutableMap.<String, Object>builder().put("sids_sketch_count", 50.0).put("sketchEstimatePostAgg", 50.0).put("sketchUnionPostAggEstimate", 50.0).put("sketchIntersectionPostAggEstimate", 50.0).put("sketchAnotBPostAggEstimate", 0.0).put("non_existing_col_validation", 0.0).build()), groupByQuery), results.get(0));
}
Also used : MapBasedRow(org.apache.druid.data.input.MapBasedRow) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) ArrayList(java.util.ArrayList) List(java.util.List) Sequence(org.apache.druid.java.util.common.guava.Sequence) File(java.io.File) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

Sequence (org.apache.druid.java.util.common.guava.Sequence)102 Test (org.junit.Test)53 List (java.util.List)44 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)37 ResponseContext (org.apache.druid.query.context.ResponseContext)32 ImmutableList (com.google.common.collect.ImmutableList)29 Intervals (org.apache.druid.java.util.common.Intervals)28 Granularities (org.apache.druid.java.util.common.granularity.Granularities)28 QueryRunner (org.apache.druid.query.QueryRunner)28 ArrayList (java.util.ArrayList)27 VirtualColumns (org.apache.druid.segment.VirtualColumns)26 Cursor (org.apache.druid.segment.Cursor)25 QueryPlus (org.apache.druid.query.QueryPlus)24 Result (org.apache.druid.query.Result)24 NullHandling (org.apache.druid.common.config.NullHandling)22 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)22 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)21 QueryableIndexStorageAdapter (org.apache.druid.segment.QueryableIndexStorageAdapter)20 DataSegment (org.apache.druid.timeline.DataSegment)20 ImmutableMap (com.google.common.collect.ImmutableMap)18