use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class SegmentAnalyzerTest method getSegmentAnalysises.
/**
* *Awesome* method name auto-generated by IntelliJ! I love IntelliJ!
*
* @param index
* @return
*/
private List<SegmentAnalysis> getSegmentAnalysises(Segment index, EnumSet<SegmentMetadataQuery.AnalysisType> analyses) {
final QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner((QueryRunnerFactory) new SegmentMetadataQueryRunnerFactory(new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER), index, null);
final SegmentMetadataQuery query = new SegmentMetadataQuery(new TableDataSource("test"), new LegacySegmentSpec("2011/2012"), null, null, null, analyses, false, false);
return runner.run(QueryPlus.wrap(query)).toList();
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class SegmentMetadataQueryTest method testSegmentMetadataQueryWithRollupMerge.
@Test
public void testSegmentMetadataQueryWithRollupMerge() {
SegmentAnalysis mergedSegmentAnalysis = new SegmentAnalysis(differentIds ? "merged" : SegmentId.dummy("testSegment").toString(), null, ImmutableMap.of("placement", new ColumnAnalysis(ColumnType.STRING, ValueType.STRING.toString(), false, false, 0, 0, null, null, null), "placementish", new ColumnAnalysis(ColumnType.STRING, ValueType.STRING.toString(), true, false, 0, 0, null, null, null)), 0, expectedSegmentAnalysis1.getNumRows() + expectedSegmentAnalysis2.getNumRows(), null, null, null, rollup1 != rollup2 ? null : rollup1);
QueryToolChest toolChest = FACTORY.getToolchest();
ExecutorService exec = Executors.newCachedThreadPool();
QueryRunner myRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(FACTORY.mergeRunners(Execs.directExecutor(), Lists.newArrayList(toolChest.preMergeQueryDecoration(runner1), toolChest.preMergeQueryDecoration(runner2)))), toolChest);
SegmentMetadataQuery query = Druids.newSegmentMetadataQueryBuilder().dataSource("testing").intervals("2013/2014").toInclude(new ListColumnIncluderator(Arrays.asList("placement", "placementish"))).analysisTypes(SegmentMetadataQuery.AnalysisType.ROLLUP).merge(true).build();
TestHelper.assertExpectedObjects(ImmutableList.of(mergedSegmentAnalysis), myRunner.run(QueryPlus.wrap(query)), "failed SegmentMetadata merging query");
exec.shutdownNow();
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class SegmentMetadataQueryTest method testSegmentMetadataQueryWithNoAnalysisTypesMerge.
@Test
public void testSegmentMetadataQueryWithNoAnalysisTypesMerge() {
SegmentAnalysis mergedSegmentAnalysis = new SegmentAnalysis(differentIds ? "merged" : SegmentId.dummy("testSegment").toString(), null, ImmutableMap.of("placement", new ColumnAnalysis(ColumnType.STRING, ValueType.STRING.toString(), false, false, 0, 0, null, null, null)), 0, expectedSegmentAnalysis1.getNumRows() + expectedSegmentAnalysis2.getNumRows(), null, null, null, null);
QueryToolChest toolChest = FACTORY.getToolchest();
ExecutorService exec = Executors.newCachedThreadPool();
QueryRunner myRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(FACTORY.mergeRunners(Execs.directExecutor(), Lists.newArrayList(toolChest.preMergeQueryDecoration(runner1), toolChest.preMergeQueryDecoration(runner2)))), toolChest);
SegmentMetadataQuery query = Druids.newSegmentMetadataQueryBuilder().dataSource("testing").intervals("2013/2014").toInclude(new ListColumnIncluderator(Collections.singletonList("placement"))).analysisTypes().merge(true).build();
TestHelper.assertExpectedObjects(ImmutableList.of(mergedSegmentAnalysis), myRunner.run(QueryPlus.wrap(query)), "failed SegmentMetadata merging query");
exec.shutdownNow();
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class SegmentMetadataQueryTest method testSegmentMetadataQueryWithComplexColumnMerge.
@Test
public void testSegmentMetadataQueryWithComplexColumnMerge() {
SegmentAnalysis mergedSegmentAnalysis = new SegmentAnalysis(differentIds ? "merged" : SegmentId.dummy("testSegment").toString(), null, ImmutableMap.of("placement", new ColumnAnalysis(ColumnType.STRING, ValueType.STRING.toString(), false, false, 0, 1, null, null, null), "quality_uniques", new ColumnAnalysis(ColumnType.ofComplex("hyperUnique"), "hyperUnique", false, true, 0, null, null, null, null)), 0, expectedSegmentAnalysis1.getNumRows() + expectedSegmentAnalysis2.getNumRows(), null, null, null, null);
QueryToolChest toolChest = FACTORY.getToolchest();
ExecutorService exec = Executors.newCachedThreadPool();
QueryRunner myRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(FACTORY.mergeRunners(Execs.directExecutor(), Lists.newArrayList(toolChest.preMergeQueryDecoration(runner1), toolChest.preMergeQueryDecoration(runner2)))), toolChest);
SegmentMetadataQuery query = Druids.newSegmentMetadataQueryBuilder().dataSource("testing").intervals("2013/2014").toInclude(new ListColumnIncluderator(Arrays.asList("placement", "quality_uniques"))).analysisTypes(SegmentMetadataQuery.AnalysisType.CARDINALITY).merge(true).build();
TestHelper.assertExpectedObjects(ImmutableList.of(mergedSegmentAnalysis), myRunner.run(QueryPlus.wrap(query)), "failed SegmentMetadata merging query");
exec.shutdownNow();
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testTimeseriesCaching.
@Test
@SuppressWarnings("unchecked")
public void testTimeseriesCaching() {
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
testQueryCaching(runner, builder.randomQueryId().build(), Intervals.of("2011-01-01/2011-01-02"), makeTimeResults(DateTimes.of("2011-01-01"), 50, 5000), Intervals.of("2011-01-02/2011-01-03"), makeTimeResults(DateTimes.of("2011-01-02"), 30, 6000), Intervals.of("2011-01-04/2011-01-05"), makeTimeResults(DateTimes.of("2011-01-04"), 23, 85312), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05"), 85, 102, DateTimes.of("2011-01-06"), 412, 521, DateTimes.of("2011-01-07"), 122, 21894, DateTimes.of("2011-01-08"), 5, 20, DateTimes.of("2011-01-09"), 18, 521), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05T01"), 80, 100, DateTimes.of("2011-01-06T01"), 420, 520, DateTimes.of("2011-01-07T01"), 12, 2194, DateTimes.of("2011-01-08T01"), 59, 201, DateTimes.of("2011-01-09T01"), 181, 52));
TimeseriesQuery query = builder.intervals("2011-01-01/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).randomQueryId().build();
TestHelper.assertExpectedResults(makeRenamedTimeResults(DateTimes.of("2011-01-01"), 50, 5000, DateTimes.of("2011-01-02"), 30, 6000, DateTimes.of("2011-01-04"), 23, 85312, DateTimes.of("2011-01-05"), 85, 102, DateTimes.of("2011-01-05T01"), 80, 100, DateTimes.of("2011-01-06"), 412, 521, DateTimes.of("2011-01-06T01"), 420, 520, DateTimes.of("2011-01-07"), 122, 21894, DateTimes.of("2011-01-07T01"), 12, 2194, DateTimes.of("2011-01-08"), 5, 20, DateTimes.of("2011-01-08T01"), 59, 201, DateTimes.of("2011-01-09"), 18, 521, DateTimes.of("2011-01-09T01"), 181, 52), runner.run(QueryPlus.wrap(query)));
}
Aggregations