Search in sources :

Example 86 with ResultRow

use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.

the class CachingClusteredClientBenchmark method groupByQuery.

@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void groupByQuery(Blackhole blackhole) {
    query = GroupByQuery.builder().setDataSource(DATA_SOURCE).setQuerySegmentSpec(basicSchemaIntervalSpec).setDimensions(new DefaultDimensionSpec("dimZipf", null), new DefaultDimensionSpec("dimSequential", null)).setAggregatorSpecs(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential")).setGranularity(Granularity.fromString(queryGranularity)).setContext(ImmutableMap.of(QueryContexts.BROKER_PARALLEL_MERGE_KEY, parallelCombine, QueryContexts.BROKER_PARALLELISM, parallelism)).build();
    final List<ResultRow> results = runQuery();
    for (ResultRow result : results) {
        blackhole.consume(result);
    }
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) BenchmarkMode(org.openjdk.jmh.annotations.BenchmarkMode) Benchmark(org.openjdk.jmh.annotations.Benchmark) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit)

Example 87 with ResultRow

use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.

the class GroupByBenchmark method queryMultiQueryableIndexWithSerde.

@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndexWithSerde(Blackhole blackhole, QueryableIndexState state) {
    QueryToolChest<ResultRow, GroupByQuery> toolChest = factory.getToolchest();
    // noinspection unchecked
    QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new SerializingQueryRunner<>(new DefaultObjectMapper(new SmileFactory()), ResultRow.class, toolChest.mergeResults(factory.mergeRunners(state.executorService, makeMultiRunners(state))))), (QueryToolChest) toolChest);
    Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
    List<ResultRow> results = queryResult.toList();
    blackhole.consume(results);
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) BenchmarkMode(org.openjdk.jmh.annotations.BenchmarkMode) Benchmark(org.openjdk.jmh.annotations.Benchmark) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit)

Example 88 with ResultRow

use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.

the class GroupByBenchmark method querySingleIncrementalIndex.

@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void querySingleIncrementalIndex(Blackhole blackhole, IncrementalIndexState state) {
    QueryRunner<ResultRow> runner = QueryBenchmarkUtil.makeQueryRunner(factory, SegmentId.dummy("incIndex"), new IncrementalIndexSegment(state.incIndex, SegmentId.dummy("incIndex")));
    final Sequence<ResultRow> results = GroupByBenchmark.runQuery(factory, runner, query);
    final ResultRow lastRow = results.accumulate(null, (accumulated, in) -> in);
    blackhole.consume(lastRow);
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) BenchmarkMode(org.openjdk.jmh.annotations.BenchmarkMode) Benchmark(org.openjdk.jmh.annotations.Benchmark) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit)

Example 89 with ResultRow

use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.

the class GroupByBenchmark method queryMultiQueryableIndexWithSpilling.

@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndexWithSpilling(Blackhole blackhole, QueryableIndexState state) {
    QueryToolChest<ResultRow, GroupByQuery> toolChest = factory.getToolchest();
    QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(state.executorService, makeMultiRunners(state))), (QueryToolChest) toolChest);
    final GroupByQuery spillingQuery = query.withOverriddenContext(ImmutableMap.of("bufferGrouperMaxSize", 4000));
    Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(spillingQuery), ResponseContext.createEmpty());
    List<ResultRow> results = queryResult.toList();
    blackhole.consume(results);
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) BenchmarkMode(org.openjdk.jmh.annotations.BenchmarkMode) Benchmark(org.openjdk.jmh.annotations.Benchmark) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit)

Example 90 with ResultRow

use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.

the class DistinctCountGroupByQueryTest method testGroupByWithDistinctCountAgg.

@Test
public void testGroupByWithDistinctCountAgg() throws Exception {
    IncrementalIndex index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.SECOND).withMetrics(new CountAggregatorFactory("cnt")).build()).setConcurrentEventAdd(true).setMaxRowCount(1000).build();
    String visitor_id = "visitor_id";
    String client_type = "client_type";
    long timestamp = DateTimes.of("2010-01-01").getMillis();
    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "0", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp + 1, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "1", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp + 2, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "2", client_type, "android")));
    GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimensions(new DefaultDimensionSpec(client_type, client_type)).setInterval(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec(client_type, OrderByColumnSpec.Direction.DESCENDING)), 10)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new DistinctCountAggregatorFactory("UV", visitor_id, null)).build();
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, factory.createRunner(incrementalIndexSegment), query);
    List<ResultRow> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", client_type, "iphone", "UV", 2L, "rows", 2L), GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", client_type, "android", "UV", 1L, "rows", 1L));
    TestHelper.assertExpectedObjects(expectedResults, results, "distinct-count");
}
Also used : ResultRow(org.apache.druid.query.groupby.ResultRow) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Segment(org.apache.druid.segment.Segment) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

ResultRow (org.apache.druid.query.groupby.ResultRow)129 Test (org.junit.Test)81 GroupByQueryRunnerTest (org.apache.druid.query.groupby.GroupByQueryRunnerTest)65 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)59 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)58 File (java.io.File)39 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)37 QueryableIndexSegment (org.apache.druid.segment.QueryableIndexSegment)34 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)24 Benchmark (org.openjdk.jmh.annotations.Benchmark)21 BenchmarkMode (org.openjdk.jmh.annotations.BenchmarkMode)21 OutputTimeUnit (org.openjdk.jmh.annotations.OutputTimeUnit)21 IncrementalIndexSegment (org.apache.druid.segment.IncrementalIndexSegment)20 LegacySegmentSpec (org.apache.druid.query.spec.LegacySegmentSpec)18 List (java.util.List)17 DefaultLimitSpec (org.apache.druid.query.groupby.orderby.DefaultLimitSpec)15 ArrayList (java.util.ArrayList)14 GroupByQueryConfig (org.apache.druid.query.groupby.GroupByQueryConfig)14 ExpressionVirtualColumn (org.apache.druid.segment.virtual.ExpressionVirtualColumn)13 ByteBuffer (java.nio.ByteBuffer)12