Search in sources :

Example 1 with LongMaxAggregatorFactory

use of io.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.

the class GroupByQueryRunnerTest method testGroupByNestedWithInnerQueryNumericsWithLongTime.

@Test
public void testGroupByNestedWithInnerQueryNumericsWithLongTime() {
    if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
        expectedException.expect(UnsupportedOperationException.class);
        expectedException.expectMessage("GroupBy v1 only supports dimensions with an outputType of STRING.");
    }
    GroupByQuery subQuery = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("market", "alias"), new DefaultDimensionSpec("__time", "time_alias", ValueType.LONG), new DefaultDimensionSpec("index", "index_alias", ValueType.FLOAT))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount)).setGranularity(QueryRunnerTestHelper.allGran).build();
    GroupByQuery outerQuery = GroupByQuery.builder().setDataSource(subQuery).setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("alias", "market"), new DefaultDimensionSpec("time_alias", "time_alias2", ValueType.LONG))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(new LongMaxAggregatorFactory("time_alias_max", "time_alias"), new DoubleMaxAggregatorFactory("index_alias_max", "index_alias"))).setGranularity(QueryRunnerTestHelper.allGran).build();
    List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "spot", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 158.74722290039062), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "spot", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 166.01605224609375), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "total_market", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1522.043701171875), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "total_market", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1321.375), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "upfront", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1447.3411865234375), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "market", "upfront", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1144.3424072265625));
    Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, outerQuery);
    TestHelper.assertExpectedObjects(expectedResults, results, "");
}
Also used : DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) RegexFilteredDimensionSpec(io.druid.query.dimension.RegexFilteredDimensionSpec) ExtractionDimensionSpec(io.druid.query.dimension.ExtractionDimensionSpec) ListFilteredDimensionSpec(io.druid.query.dimension.ListFilteredDimensionSpec) DimensionSpec(io.druid.query.dimension.DimensionSpec) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) Row(io.druid.data.input.Row) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) Test(org.junit.Test)

Example 2 with LongMaxAggregatorFactory

use of io.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.

the class SegmentMetadataQueryQueryToolChestTest method testMergeAggregatorsConflict.

@Test
public void testMergeAggregatorsConflict() {
    final SegmentAnalysis analysis1 = new SegmentAnalysis("id", null, Maps.<String, ColumnAnalysis>newHashMap(), 0, 0, ImmutableMap.of("foo", new LongSumAggregatorFactory("foo", "foo"), "bar", new DoubleSumAggregatorFactory("bar", "bar")), null, null, null);
    final SegmentAnalysis analysis2 = new SegmentAnalysis("id", null, Maps.<String, ColumnAnalysis>newHashMap(), 0, 0, ImmutableMap.of("foo", new LongSumAggregatorFactory("foo", "foo"), "bar", new DoubleMaxAggregatorFactory("bar", "bar"), "baz", new LongMaxAggregatorFactory("baz", "baz")), null, null, null);
    final Map<String, AggregatorFactory> expectedLenient = Maps.newHashMap();
    expectedLenient.put("foo", new LongSumAggregatorFactory("foo", "foo"));
    expectedLenient.put("bar", null);
    expectedLenient.put("baz", new LongMaxAggregatorFactory("baz", "baz"));
    Assert.assertNull(mergeStrict(analysis1, analysis2).getAggregators());
    Assert.assertEquals(expectedLenient, mergeLenient(analysis1, analysis2).getAggregators());
    // Simulate multi-level merge
    Assert.assertEquals(expectedLenient, mergeLenient(mergeLenient(analysis1, analysis2), mergeLenient(analysis1, analysis2)).getAggregators());
}
Also used : DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) Test(org.junit.Test)

Example 3 with LongMaxAggregatorFactory

use of io.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.

the class GroupByRules method translateAggregateCall.

/**
   * Translate an AggregateCall to Druid equivalents.
   *
   * @return translated aggregation, or null if translation failed.
   */
private static Aggregation translateAggregateCall(final PlannerContext plannerContext, final RowSignature sourceRowSignature, final Project project, final AggregateCall call, final DruidOperatorTable operatorTable, final List<Aggregation> existingAggregations, final int aggNumber, final boolean approximateCountDistinct) {
    final List<DimFilter> filters = Lists.newArrayList();
    final List<String> rowOrder = sourceRowSignature.getRowOrder();
    final String name = aggOutputName(aggNumber);
    final SqlKind kind = call.getAggregation().getKind();
    final SqlTypeName outputType = call.getType().getSqlTypeName();
    if (call.filterArg >= 0) {
        // AGG(xxx) FILTER(WHERE yyy)
        if (project == null) {
            // We need some kind of projection to support filtered aggregations.
            return null;
        }
        final RexNode expression = project.getChildExps().get(call.filterArg);
        final DimFilter filter = Expressions.toFilter(operatorTable, plannerContext, sourceRowSignature, expression);
        if (filter == null) {
            return null;
        }
        filters.add(filter);
    }
    if (kind == SqlKind.COUNT && call.getArgList().isEmpty()) {
        // COUNT(*)
        return Aggregation.create(new CountAggregatorFactory(name)).filter(makeFilter(filters, sourceRowSignature));
    } else if (kind == SqlKind.COUNT && call.isDistinct()) {
        // COUNT(DISTINCT x)
        return approximateCountDistinct ? APPROX_COUNT_DISTINCT.toDruidAggregation(name, sourceRowSignature, operatorTable, plannerContext, existingAggregations, project, call, makeFilter(filters, sourceRowSignature)) : null;
    } else if (kind == SqlKind.COUNT || kind == SqlKind.SUM || kind == SqlKind.SUM0 || kind == SqlKind.MIN || kind == SqlKind.MAX || kind == SqlKind.AVG) {
        // Built-in agg, not distinct, not COUNT(*)
        boolean forceCount = false;
        final FieldOrExpression input;
        final int inputField = Iterables.getOnlyElement(call.getArgList());
        final RexNode rexNode = Expressions.fromFieldAccess(sourceRowSignature, project, inputField);
        final FieldOrExpression foe = FieldOrExpression.fromRexNode(operatorTable, plannerContext, rowOrder, rexNode);
        if (foe != null) {
            input = foe;
        } else if (rexNode.getKind() == SqlKind.CASE && ((RexCall) rexNode).getOperands().size() == 3) {
            // Possibly a CASE-style filtered aggregation. Styles supported:
            // A: SUM(CASE WHEN x = 'foo' THEN cnt END) => operands (x = 'foo', cnt, null)
            // B: SUM(CASE WHEN x = 'foo' THEN 1 ELSE 0 END) => operands (x = 'foo', 1, 0)
            // C: COUNT(CASE WHEN x = 'foo' THEN 'dummy' END) => operands (x = 'foo', 'dummy', null)
            // If the null and non-null args are switched, "flip" is set, which negates the filter.
            final RexCall caseCall = (RexCall) rexNode;
            final boolean flip = RexLiteral.isNullLiteral(caseCall.getOperands().get(1)) && !RexLiteral.isNullLiteral(caseCall.getOperands().get(2));
            final RexNode arg1 = caseCall.getOperands().get(flip ? 2 : 1);
            final RexNode arg2 = caseCall.getOperands().get(flip ? 1 : 2);
            // Operand 1: Filter
            final DimFilter filter = Expressions.toFilter(operatorTable, plannerContext, sourceRowSignature, caseCall.getOperands().get(0));
            if (filter == null) {
                return null;
            } else {
                filters.add(flip ? new NotDimFilter(filter) : filter);
            }
            if (call.getAggregation().getKind() == SqlKind.COUNT && arg1 instanceof RexLiteral && !RexLiteral.isNullLiteral(arg1) && RexLiteral.isNullLiteral(arg2)) {
                // Case C
                forceCount = true;
                input = null;
            } else if (call.getAggregation().getKind() == SqlKind.SUM && arg1 instanceof RexLiteral && ((Number) RexLiteral.value(arg1)).intValue() == 1 && arg2 instanceof RexLiteral && ((Number) RexLiteral.value(arg2)).intValue() == 0) {
                // Case B
                forceCount = true;
                input = null;
            } else if (RexLiteral.isNullLiteral(arg2)) {
                // Maybe case A
                input = FieldOrExpression.fromRexNode(operatorTable, plannerContext, rowOrder, arg1);
                if (input == null) {
                    return null;
                }
            } else {
                // Can't translate CASE into a filter.
                return null;
            }
        } else {
            // Can't translate operand.
            return null;
        }
        if (!forceCount) {
            Preconditions.checkNotNull(input, "WTF?! input was null for non-COUNT aggregation");
        }
        if (forceCount || kind == SqlKind.COUNT) {
            // COUNT(x)
            return Aggregation.create(new CountAggregatorFactory(name)).filter(makeFilter(filters, sourceRowSignature));
        } else {
            // Built-in aggregator that is not COUNT.
            final Aggregation retVal;
            final String fieldName = input.getFieldName();
            final String expression = input.getExpression();
            final boolean isLong = SqlTypeName.INT_TYPES.contains(outputType) || SqlTypeName.TIMESTAMP == outputType || SqlTypeName.DATE == outputType;
            if (kind == SqlKind.SUM || kind == SqlKind.SUM0) {
                retVal = isLong ? Aggregation.create(new LongSumAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleSumAggregatorFactory(name, fieldName, expression));
            } else if (kind == SqlKind.MIN) {
                retVal = isLong ? Aggregation.create(new LongMinAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleMinAggregatorFactory(name, fieldName, expression));
            } else if (kind == SqlKind.MAX) {
                retVal = isLong ? Aggregation.create(new LongMaxAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleMaxAggregatorFactory(name, fieldName, expression));
            } else if (kind == SqlKind.AVG) {
                final String sumName = aggInternalName(aggNumber, "sum");
                final String countName = aggInternalName(aggNumber, "count");
                final AggregatorFactory sum = isLong ? new LongSumAggregatorFactory(sumName, fieldName, expression) : new DoubleSumAggregatorFactory(sumName, fieldName, expression);
                final AggregatorFactory count = new CountAggregatorFactory(countName);
                retVal = Aggregation.create(ImmutableList.of(sum, count), new ArithmeticPostAggregator(name, "quotient", ImmutableList.<PostAggregator>of(new FieldAccessPostAggregator(null, sumName), new FieldAccessPostAggregator(null, countName))));
            } else {
                // Not reached.
                throw new ISE("WTF?! Kind[%s] got into the built-in aggregator path somehow?!", kind);
            }
            return retVal.filter(makeFilter(filters, sourceRowSignature));
        }
    } else {
        // Not a built-in aggregator, check operator table.
        final SqlAggregator sqlAggregator = operatorTable.lookupAggregator(call.getAggregation().getName());
        return sqlAggregator != null ? sqlAggregator.toDruidAggregation(name, sourceRowSignature, operatorTable, plannerContext, existingAggregations, project, call, makeFilter(filters, sourceRowSignature)) : null;
    }
}
Also used : RexLiteral(org.apache.calcite.rex.RexLiteral) ArithmeticPostAggregator(io.druid.query.aggregation.post.ArithmeticPostAggregator) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) DoubleMinAggregatorFactory(io.druid.query.aggregation.DoubleMinAggregatorFactory) LongMinAggregatorFactory(io.druid.query.aggregation.LongMinAggregatorFactory) RexCall(org.apache.calcite.rex.RexCall) Aggregation(io.druid.sql.calcite.aggregation.Aggregation) ISE(io.druid.java.util.common.ISE) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) NotDimFilter(io.druid.query.filter.NotDimFilter) FieldAccessPostAggregator(io.druid.query.aggregation.post.FieldAccessPostAggregator) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) PostAggregator(io.druid.query.aggregation.PostAggregator) FieldAccessPostAggregator(io.druid.query.aggregation.post.FieldAccessPostAggregator) ArithmeticPostAggregator(io.druid.query.aggregation.post.ArithmeticPostAggregator) SqlKind(org.apache.calcite.sql.SqlKind) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongMinAggregatorFactory(io.druid.query.aggregation.LongMinAggregatorFactory) PostAggregatorFactory(io.druid.sql.calcite.aggregation.PostAggregatorFactory) DoubleMinAggregatorFactory(io.druid.query.aggregation.DoubleMinAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) SqlAggregator(io.druid.sql.calcite.aggregation.SqlAggregator) ApproxCountDistinctSqlAggregator(io.druid.sql.calcite.aggregation.ApproxCountDistinctSqlAggregator) DimFilter(io.druid.query.filter.DimFilter) NotDimFilter(io.druid.query.filter.NotDimFilter) AndDimFilter(io.druid.query.filter.AndDimFilter) RexNode(org.apache.calcite.rex.RexNode)

Example 4 with LongMaxAggregatorFactory

use of io.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.

the class OnheapIncrementalIndexTest method testMultithreadAddFacts.

@Test
public void testMultithreadAddFacts() throws Exception {
    final OnheapIncrementalIndex index = new OnheapIncrementalIndex(0, Granularities.MINUTE, new AggregatorFactory[] { new LongMaxAggregatorFactory("max", "max") }, MAX_ROWS);
    final Random random = new Random();
    final int addThreadCount = 2;
    Thread[] addThreads = new Thread[addThreadCount];
    for (int i = 0; i < addThreadCount; ++i) {
        addThreads[i] = new Thread(new Runnable() {

            @Override
            public void run() {
                try {
                    for (int j = 0; j < MAX_ROWS / addThreadCount; ++j) {
                        index.add(new MapBasedInputRow(0, Lists.newArrayList("billy"), ImmutableMap.<String, Object>of("billy", random.nextLong(), "max", 1)));
                    }
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        });
        addThreads[i].start();
    }
    final AtomicInteger checkFailedCount = new AtomicInteger(0);
    Thread checkThread = new Thread(new Runnable() {

        @Override
        public void run() {
            while (!Thread.interrupted()) {
                for (Map.Entry<IncrementalIndex.TimeAndDims, Integer> row : index.getFacts().entrySet()) {
                    if (index.getMetricLongValue(row.getValue(), 0) != 1) {
                        checkFailedCount.addAndGet(1);
                    }
                }
            }
        }
    });
    checkThread.start();
    for (int i = 0; i < addThreadCount; ++i) {
        addThreads[i].join();
    }
    checkThread.interrupt();
    Assert.assertEquals(0, checkFailedCount.get());
}
Also used : Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) Test(org.junit.Test)

Example 5 with LongMaxAggregatorFactory

use of io.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.

the class TimeseriesBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    Map<String, TimeseriesQuery> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    {
        // basic.A
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
        queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform"));
        queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal"));
        queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf"));
        queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper"));
        TimeseriesQuery queryA = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("A", queryA);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(Column.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.NUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterNumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(Column.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.ALPHANUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterAlphanumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(new Interval(200000, 300000)));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        queryAggs.add(lsaf);
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterByInterval", timeFilterQuery);
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) BoundDimFilter(io.druid.query.filter.BoundDimFilter) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) DoubleMinAggregatorFactory(io.druid.query.aggregation.DoubleMinAggregatorFactory) LinkedHashMap(java.util.LinkedHashMap) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) QuerySegmentSpec(io.druid.query.spec.QuerySegmentSpec) List(java.util.List) ArrayList(java.util.ArrayList) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) Interval(org.joda.time.Interval)

Aggregations

LongMaxAggregatorFactory (io.druid.query.aggregation.LongMaxAggregatorFactory)9 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)6 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)5 Test (org.junit.Test)5 DoubleMaxAggregatorFactory (io.druid.query.aggregation.DoubleMaxAggregatorFactory)4 DoubleMinAggregatorFactory (io.druid.query.aggregation.DoubleMinAggregatorFactory)4 ArrayList (java.util.ArrayList)4 BenchmarkSchemaInfo (io.druid.benchmark.datagen.BenchmarkSchemaInfo)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3 HyperUniquesAggregatorFactory (io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory)3 MultipleIntervalSegmentSpec (io.druid.query.spec.MultipleIntervalSegmentSpec)3 QuerySegmentSpec (io.druid.query.spec.QuerySegmentSpec)3 LinkedHashMap (java.util.LinkedHashMap)3 List (java.util.List)3 MapBasedInputRow (io.druid.data.input.MapBasedInputRow)2 ExtractionDimensionSpec (io.druid.query.dimension.ExtractionDimensionSpec)2 DimensionTopNMetricSpec (io.druid.query.topn.DimensionTopNMetricSpec)2 TopNQueryBuilder (io.druid.query.topn.TopNQueryBuilder)2 Row (io.druid.data.input.Row)1 TimestampSpec (io.druid.data.input.impl.TimestampSpec)1