use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.
the class CachingClusteredClientBenchmark method groupByQuery.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void groupByQuery(Blackhole blackhole) {
query = GroupByQuery.builder().setDataSource(DATA_SOURCE).setQuerySegmentSpec(basicSchemaIntervalSpec).setDimensions(new DefaultDimensionSpec("dimZipf", null), new DefaultDimensionSpec("dimSequential", null)).setAggregatorSpecs(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential")).setGranularity(Granularity.fromString(queryGranularity)).setContext(ImmutableMap.of(QueryContexts.BROKER_PARALLEL_MERGE_KEY, parallelCombine, QueryContexts.BROKER_PARALLELISM, parallelism)).build();
final List<ResultRow> results = runQuery();
for (ResultRow result : results) {
blackhole.consume(result);
}
}
use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.
the class GroupByBenchmark method queryMultiQueryableIndexWithSerde.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndexWithSerde(Blackhole blackhole, QueryableIndexState state) {
QueryToolChest<ResultRow, GroupByQuery> toolChest = factory.getToolchest();
// noinspection unchecked
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new SerializingQueryRunner<>(new DefaultObjectMapper(new SmileFactory()), ResultRow.class, toolChest.mergeResults(factory.mergeRunners(state.executorService, makeMultiRunners(state))))), (QueryToolChest) toolChest);
Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
blackhole.consume(results);
}
use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.
the class GroupByBenchmark method querySingleIncrementalIndex.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void querySingleIncrementalIndex(Blackhole blackhole, IncrementalIndexState state) {
QueryRunner<ResultRow> runner = QueryBenchmarkUtil.makeQueryRunner(factory, SegmentId.dummy("incIndex"), new IncrementalIndexSegment(state.incIndex, SegmentId.dummy("incIndex")));
final Sequence<ResultRow> results = GroupByBenchmark.runQuery(factory, runner, query);
final ResultRow lastRow = results.accumulate(null, (accumulated, in) -> in);
blackhole.consume(lastRow);
}
use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.
the class GroupByBenchmark method queryMultiQueryableIndexWithSpilling.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndexWithSpilling(Blackhole blackhole, QueryableIndexState state) {
QueryToolChest<ResultRow, GroupByQuery> toolChest = factory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(state.executorService, makeMultiRunners(state))), (QueryToolChest) toolChest);
final GroupByQuery spillingQuery = query.withOverriddenContext(ImmutableMap.of("bufferGrouperMaxSize", 4000));
Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(spillingQuery), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
blackhole.consume(results);
}
use of org.apache.druid.query.groupby.ResultRow in project druid by druid-io.
the class DistinctCountGroupByQueryTest method testGroupByWithDistinctCountAgg.
@Test
public void testGroupByWithDistinctCountAgg() throws Exception {
IncrementalIndex index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.SECOND).withMetrics(new CountAggregatorFactory("cnt")).build()).setConcurrentEventAdd(true).setMaxRowCount(1000).build();
String visitor_id = "visitor_id";
String client_type = "client_type";
long timestamp = DateTimes.of("2010-01-01").getMillis();
index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "0", client_type, "iphone")));
index.add(new MapBasedInputRow(timestamp + 1, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "1", client_type, "iphone")));
index.add(new MapBasedInputRow(timestamp + 2, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "2", client_type, "android")));
GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimensions(new DefaultDimensionSpec(client_type, client_type)).setInterval(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec(client_type, OrderByColumnSpec.Direction.DESCENDING)), 10)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new DistinctCountAggregatorFactory("UV", visitor_id, null)).build();
final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, factory.createRunner(incrementalIndexSegment), query);
List<ResultRow> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", client_type, "iphone", "UV", 2L, "rows", 2L), GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", client_type, "android", "UV", 1L, "rows", 1L));
TestHelper.assertExpectedObjects(expectedResults, results, "distinct-count");
}
Aggregations