use of org.apache.druid.query.extraction.TimeFormatExtractionFn in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByNestedDoubleTimeExtractionFnWithLongOutputTypes.
@Test
public void testGroupByNestedDoubleTimeExtractionFnWithLongOutputTypes() {
// Cannot vectorize due to extraction dimension spec.
cannotVectorize();
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("GroupBy v1 only supports dimensions with an outputType of STRING.");
}
GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, "time_day", ColumnType.LONG, new TimeFormatExtractionFn(null, null, null, Granularities.DAY, true))).setDimFilter(new SelectorDimFilter("quality", "technology", null)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
GroupByQuery outerQuery = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("alias", "alias"), new ExtractionDimensionSpec("time_day", "time_week", ColumnType.LONG, new TimeFormatExtractionFn(null, null, null, Granularities.WEEK, true))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
List<ResultRow> expectedResults = Collections.singletonList(makeRow(outerQuery, "2011-04-01", "alias", "technology", "time_week", 1301270400000L, "rows", 2L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, outerQuery);
TestHelper.assertExpectedObjects(expectedResults, results, "extraction-fn");
}
use of org.apache.druid.query.extraction.TimeFormatExtractionFn in project druid by druid-io.
the class GroupByLimitPushDownMultiNodeMergeTest method testPartialLimitPushDownMerge.
@Test
public void testPartialLimitPushDownMerge() {
// one segment's results use limit push down, the other doesn't because of insufficient buffer capacity
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getRunner1(0))), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner2 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory2.mergeRunners(executorService, getRunner2(1))), (QueryToolChest) toolChest);
QueryRunner<ResultRow> finalRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(theRunner.run(queryPlus, responseContext), theRunner2.run(queryPlus, responseContext))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering());
}
}), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(1500000000000L, 1600000000000L)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", "dimA"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, "hour", ColumnType.LONG, new TimeFormatExtractionFn(null, null, null, new PeriodGranularity(new Period("PT1H"), null, DateTimeZone.UTC), true))).setAggregatorSpecs(new LongSumAggregatorFactory("metASum", "metA")).setLimitSpec(new DefaultLimitSpec(Arrays.asList(new OrderByColumnSpec("hour", OrderByColumnSpec.Direction.ASCENDING, StringComparators.NUMERIC), new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.ASCENDING)), 1000)).setContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, true)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = finalRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "mango", "hour", 1505260800000L, "metASum", 26L);
ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "pomegranate", "hour", 1505260800000L, "metASum", 7113L);
ResultRow expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "mango", "hour", 1505264400000L, "metASum", 10L);
ResultRow expectedRow3 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "2017-07-14T02:40:00.000Z", "dimA", "pomegranate", "hour", 1505264400000L, "metASum", 7726L);
Assert.assertEquals(4, results.size());
Assert.assertEquals(expectedRow0, results.get(0));
Assert.assertEquals(expectedRow1, results.get(1));
Assert.assertEquals(expectedRow2, results.get(2));
Assert.assertEquals(expectedRow3, results.get(3));
}
use of org.apache.druid.query.extraction.TimeFormatExtractionFn in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByTimeExtractionWithNulls.
@Test
public void testGroupByTimeExtractionWithNulls() {
// Cannot vectorize due to extraction dimension specs.
cannotVectorize();
final DimExtractionFn nullWednesdays = new DimExtractionFn() {
@Override
public String apply(String dimValue) {
if ("Wednesday".equals(dimValue)) {
return null;
} else {
return dimValue;
}
}
@Override
public byte[] getCacheKey() {
throw new UnsupportedOperationException();
}
@Override
public boolean preservesOrdering() {
return false;
}
@Override
public ExtractionType getExtractionType() {
return ExtractionType.MANY_TO_ONE;
}
};
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("market", "market"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, "dayOfWeek", new CascadeExtractionFn(new ExtractionFn[] { new TimeFormatExtractionFn("EEEE", null, null, null, false), nullWednesdays }))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM).setPostAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT)).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimFilter(new OrDimFilter(Arrays.asList(new SelectorDimFilter("market", "spot", null), new SelectorDimFilter("market", "upfront", null)))).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "1970-01-01", "dayOfWeek", null, "market", "spot", "index", 14271.368591308594, "rows", 126L, "addRowsIndexConstant", 14398.368591308594), makeRow(query, "1970-01-01", "dayOfWeek", "Friday", "market", "spot", "index", 13219.574157714844, "rows", 117L, "addRowsIndexConstant", 13337.574157714844), makeRow(query, "1970-01-01", "dayOfWeek", "Monday", "market", "spot", "index", 13557.738830566406, "rows", 117L, "addRowsIndexConstant", 13675.738830566406), makeRow(query, "1970-01-01", "dayOfWeek", "Saturday", "market", "spot", "index", 13493.751281738281, "rows", 117L, "addRowsIndexConstant", 13611.751281738281), makeRow(query, "1970-01-01", "dayOfWeek", "Sunday", "market", "spot", "index", 13585.541015625, "rows", 117L, "addRowsIndexConstant", 13703.541015625), makeRow(query, "1970-01-01", "dayOfWeek", "Thursday", "market", "spot", "index", 14279.127197265625, "rows", 126L, "addRowsIndexConstant", 14406.127197265625), makeRow(query, "1970-01-01", "dayOfWeek", "Tuesday", "market", "spot", "index", 13199.471435546875, "rows", 117L, "addRowsIndexConstant", 13317.471435546875), makeRow(query, "1970-01-01", "dayOfWeek", null, "market", "upfront", "index", 28985.5751953125, "rows", 28L, "addRowsIndexConstant", 29014.5751953125), makeRow(query, "1970-01-01", "dayOfWeek", "Friday", "market", "upfront", "index", 27297.8623046875, "rows", 26L, "addRowsIndexConstant", 27324.8623046875), makeRow(query, "1970-01-01", "dayOfWeek", "Monday", "market", "upfront", "index", 27619.58447265625, "rows", 26L, "addRowsIndexConstant", 27646.58447265625), makeRow(query, "1970-01-01", "dayOfWeek", "Saturday", "market", "upfront", "index", 27820.83154296875, "rows", 26L, "addRowsIndexConstant", 27847.83154296875), makeRow(query, "1970-01-01", "dayOfWeek", "Sunday", "market", "upfront", "index", 24791.223876953125, "rows", 26L, "addRowsIndexConstant", 24818.223876953125), makeRow(query, "1970-01-01", "dayOfWeek", "Thursday", "market", "upfront", "index", 28562.748901367188, "rows", 28L, "addRowsIndexConstant", 28591.748901367188), makeRow(query, "1970-01-01", "dayOfWeek", "Tuesday", "market", "upfront", "index", 26968.280639648438, "rows", 26L, "addRowsIndexConstant", 26995.280639648438));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "time-extraction");
}
use of org.apache.druid.query.extraction.TimeFormatExtractionFn in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByTimeExtractionNamedUnderUnderTime.
@Test
public void testGroupByTimeExtractionNamedUnderUnderTime() {
expectedException.expect(IAE.class);
expectedException.expectMessage("'__time' cannot be used as an output name for dimensions, aggregators, or post-aggregators.");
makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("market", "market"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, ColumnHolder.TIME_COLUMN_NAME, new TimeFormatExtractionFn("EEEE", null, null, null, false))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM).setPostAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT)).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimFilter(new OrDimFilter(Arrays.asList(new SelectorDimFilter("market", "spot", null), new SelectorDimFilter("market", "upfront", null)))).setLimitSpec(new DefaultLimitSpec(ImmutableList.of(), 1)).build();
}
use of org.apache.druid.query.extraction.TimeFormatExtractionFn in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByTimeExtraction.
@Test
public void testGroupByTimeExtraction() {
// Cannot vectorize due to extraction dimension spec.
cannotVectorize();
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("market", "market"), new ExtractionDimensionSpec(ColumnHolder.TIME_COLUMN_NAME, "dayOfWeek", new TimeFormatExtractionFn("EEEE", null, null, null, false))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM).setPostAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT)).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimFilter(new OrDimFilter(Arrays.asList(new SelectorDimFilter("market", "spot", null), new SelectorDimFilter("market", "upfront", null)))).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "1970-01-01", "dayOfWeek", "Friday", "market", "spot", "index", 13219.574157714844, "rows", 117L, "addRowsIndexConstant", 13337.574157714844), makeRow(query, "1970-01-01", "dayOfWeek", "Monday", "market", "spot", "index", 13557.738830566406, "rows", 117L, "addRowsIndexConstant", 13675.738830566406), makeRow(query, "1970-01-01", "dayOfWeek", "Saturday", "market", "spot", "index", 13493.751281738281, "rows", 117L, "addRowsIndexConstant", 13611.751281738281), makeRow(query, "1970-01-01", "dayOfWeek", "Sunday", "market", "spot", "index", 13585.541015625, "rows", 117L, "addRowsIndexConstant", 13703.541015625), makeRow(query, "1970-01-01", "dayOfWeek", "Thursday", "market", "spot", "index", 14279.127197265625, "rows", 126L, "addRowsIndexConstant", 14406.127197265625), makeRow(query, "1970-01-01", "dayOfWeek", "Tuesday", "market", "spot", "index", 13199.471435546875, "rows", 117L, "addRowsIndexConstant", 13317.471435546875), makeRow(query, "1970-01-01", "dayOfWeek", "Wednesday", "market", "spot", "index", 14271.368591308594, "rows", 126L, "addRowsIndexConstant", 14398.368591308594), makeRow(query, "1970-01-01", "dayOfWeek", "Friday", "market", "upfront", "index", 27297.8623046875, "rows", 26L, "addRowsIndexConstant", 27324.8623046875), makeRow(query, "1970-01-01", "dayOfWeek", "Monday", "market", "upfront", "index", 27619.58447265625, "rows", 26L, "addRowsIndexConstant", 27646.58447265625), makeRow(query, "1970-01-01", "dayOfWeek", "Saturday", "market", "upfront", "index", 27820.83154296875, "rows", 26L, "addRowsIndexConstant", 27847.83154296875), makeRow(query, "1970-01-01", "dayOfWeek", "Sunday", "market", "upfront", "index", 24791.223876953125, "rows", 26L, "addRowsIndexConstant", 24818.223876953125), makeRow(query, "1970-01-01", "dayOfWeek", "Thursday", "market", "upfront", "index", 28562.748901367188, "rows", 28L, "addRowsIndexConstant", 28591.748901367188), makeRow(query, "1970-01-01", "dayOfWeek", "Tuesday", "market", "upfront", "index", 26968.280639648438, "rows", 26L, "addRowsIndexConstant", 26995.280639648438), makeRow(query, "1970-01-01", "dayOfWeek", "Wednesday", "market", "upfront", "index", 28985.5751953125, "rows", 28L, "addRowsIndexConstant", 29014.5751953125));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "time-extraction");
}
Aggregations