Search in sources :

Example 81 with DefaultDimensionSpec

use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.

the class GroupByQueryRunnerFailureTest method testNotEnoughMergeBuffersOnQueryable.

@Test(timeout = 60_000L)
public void testNotEnoughMergeBuffersOnQueryable() {
    expectedException.expect(QueryTimeoutException.class);
    expectedException.expectMessage("Cannot acquire enough merge buffers");
    final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500)).build();
    GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
}
Also used : QueryDataSource(org.apache.druid.query.QueryDataSource) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Test(org.junit.Test)

Example 82 with DefaultDimensionSpec

use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.

the class GroupByLimitPushDownMultiNodeMergeTest method testPartialLimitPushDownMerge.

@Test
public void testPartialLimitPushDownMerge() {
    // one segment's results use limit push down, the other doesn't because of insufficient buffer capacity
    QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
    QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getRunner1(0))), (QueryToolChest) toolChest);
    QueryRunner<ResultRow> theRunner2 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory2.mergeRunners(executorService, getRunner2(1))), (QueryToolChest) toolChest);
    QueryRunner<ResultRow> finalRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new QueryRunner<ResultRow>() {

        @Override
        public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
            return Sequences.simple(ImmutableList.of(theRunner.run(queryPlus, responseContext), theRunner2.run(queryPlus, responseContext))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering());
        }
    }), (QueryToolChest) toolChest);
    QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(1500000000000L, 1600000000000L)));
    GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", "dimA"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, "hour", ColumnType.LONG, new TimeFormatExtractionFn(null, null, null, new PeriodGranularity(new Period("PT1H"), null, DateTimeZone.UTC), true))).setAggregatorSpecs(new LongSumAggregatorFactory("metASum", "metA")).setLimitSpec(new DefaultLimitSpec(Arrays.asList(new OrderByColumnSpec("hour", OrderByColumnSpec.Direction.ASCENDING, StringComparators.NUMERIC), new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.ASCENDING)), 1000)).setContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, true)).setGranularity(Granularities.ALL).build();
    Sequence<ResultRow> queryResult = finalRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
    List<ResultRow> results = queryResult.toList();
    ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "mango", "hour", 1505260800000L, "metASum", 26L);
    ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "pomegranate", "hour", 1505260800000L, "metASum", 7113L);
    ResultRow expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "mango", "hour", 1505264400000L, "metASum", 10L);
    ResultRow expectedRow3 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "pomegranate", "hour", 1505264400000L, "metASum", 7726L);
    Assert.assertEquals(4, results.size());
    Assert.assertEquals(expectedRow0, results.get(0));
    Assert.assertEquals(expectedRow1, results.get(1));
    Assert.assertEquals(expectedRow2, results.get(2));
    Assert.assertEquals(expectedRow3, results.get(3));
}
Also used : TimeFormatExtractionFn(org.apache.druid.query.extraction.TimeFormatExtractionFn) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) Period(org.joda.time.Period) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ResponseContext(org.apache.druid.query.context.ResponseContext) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) QueryPlus(org.apache.druid.query.QueryPlus) ExtractionDimensionSpec(org.apache.druid.query.dimension.ExtractionDimensionSpec) Test(org.junit.Test)

Example 83 with DefaultDimensionSpec

use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.

the class GroupByQueryMergeBufferTest method testTripleNestedGroupBy.

@Test
public void testTripleNestedGroupBy() {
    final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(Lists.newArrayList(new DefaultDimensionSpec("quality", "alias"), new DefaultDimensionSpec("market", null), new DefaultDimensionSpec("placement", null))).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build()).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias"), new DefaultDimensionSpec("market", null)).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build()).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT)).build();
    Assert.assertEquals(2, GroupByStrategyV2.countRequiredMergeBufferNum(query));
    GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
    // This should be 1 because the broker needs 2 buffers and the queryable node needs one.
    Assert.assertEquals(1, MERGE_BUFFER_POOL.getMinRemainBufferNum());
    Assert.assertEquals(4, MERGE_BUFFER_POOL.getPoolSize());
}
Also used : QueryDataSource(org.apache.druid.query.QueryDataSource) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 84 with DefaultDimensionSpec

use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.

the class GroupByQueryRunnerFactoryTest method testMergeRunnersEnsureGroupMerging.

@Test
public void testMergeRunnersEnsureGroupMerging() {
    GroupByQuery query = GroupByQuery.builder().setDataSource("xx").setQuerySegmentSpec(new LegacySegmentSpec("1970/3000")).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("tags", "tags")).setAggregatorSpecs(new CountAggregatorFactory("count")).build();
    QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner() {

        @Override
        public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
            return factory.getToolchest().mergeResults(new QueryRunner() {

                @Override
                public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
                    final Query query = queryPlus.getQuery();
                    try {
                        return new MergeSequence(query.getResultOrdering(), Sequences.simple(Arrays.asList(factory.createRunner(createSegment()).run(queryPlus, responseContext), factory.createRunner(createSegment()).run(queryPlus, responseContext))));
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            }).run(queryPlus, responseContext);
        }
    });
    Sequence<ResultRow> result = mergedRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
    List<ResultRow> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "tags", "t1", "count", 2L), GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "tags", "t2", "count", 4L));
    TestHelper.assertExpectedObjects(expectedResults, result.toList(), "");
}
Also used : Query(org.apache.druid.query.Query) Sequence(org.apache.druid.java.util.common.guava.Sequence) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) LegacySegmentSpec(org.apache.druid.query.spec.LegacySegmentSpec) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) QueryRunner(org.apache.druid.query.QueryRunner) IOException(java.io.IOException) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryPlus(org.apache.druid.query.QueryPlus) Test(org.junit.Test)

Example 85 with DefaultDimensionSpec

use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.

the class CalciteParameterQueryTest method testNullParameter.

@Test
public void testNullParameter() throws Exception {
    cannotVectorize();
    // contrived example of using null as an sql parameter to at least test the codepath because lots of things dont
    // actually work as null and things like 'IS NULL' fail to parse in calcite if expressed as 'IS ?'
    // this will optimize out the 3rd argument because 2nd argument will be constant and not null
    testQuery("SELECT COALESCE(dim2, ?, ?), COUNT(*) FROM druid.foo GROUP BY 1\n", ImmutableList.of(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "case_searched(notnull(\"dim2\"),\"dim2\",'parameter')", ColumnType.STRING)).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.STRING))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(QUERY_CONTEXT_DEFAULT).build()), NullHandling.replaceWithDefault() ? ImmutableList.of(new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 3L }) : ImmutableList.of(new Object[] { "", 1L }, new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 2L }), ImmutableList.of(new SqlParameter(SqlType.VARCHAR, "parameter"), new SqlParameter(SqlType.VARCHAR, null)));
    // when converting to rel expression, this will optimize out 2nd argument to coalesce which is null
    testQuery("SELECT COALESCE(dim2, ?, ?), COUNT(*) FROM druid.foo GROUP BY 1\n", ImmutableList.of(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "case_searched(notnull(\"dim2\"),\"dim2\",'parameter')", ColumnType.STRING)).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.STRING))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(QUERY_CONTEXT_DEFAULT).build()), NullHandling.replaceWithDefault() ? ImmutableList.of(new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 3L }) : ImmutableList.of(new Object[] { "", 1L }, new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 2L }), ImmutableList.of(new SqlParameter(SqlType.VARCHAR, null), new SqlParameter(SqlType.VARCHAR, "parameter")));
}
Also used : SqlParameter(org.apache.druid.sql.http.SqlParameter) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Test(org.junit.Test)

Aggregations

DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)494 Test (org.junit.Test)461 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)261 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)235 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)107 DefaultLimitSpec (org.apache.druid.query.groupby.orderby.DefaultLimitSpec)106 OrderByColumnSpec (org.apache.druid.query.groupby.orderby.OrderByColumnSpec)104 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)53 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)53 ExpressionVirtualColumn (org.apache.druid.segment.virtual.ExpressionVirtualColumn)53 QueryDataSource (org.apache.druid.query.QueryDataSource)52 SelectorDimFilter (org.apache.druid.query.filter.SelectorDimFilter)43 TableDataSource (org.apache.druid.query.TableDataSource)41 Result (org.apache.druid.query.Result)37 ResultRow (org.apache.druid.query.groupby.ResultRow)35 GroupByQueryRunnerTest (org.apache.druid.query.groupby.GroupByQueryRunnerTest)33 ArrayList (java.util.ArrayList)32 HashMap (java.util.HashMap)30 TopNQueryBuilder (org.apache.druid.query.topn.TopNQueryBuilder)28 ExtractionDimensionSpec (org.apache.druid.query.dimension.ExtractionDimensionSpec)26