use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class CalciteQueryTest method testMinMaxAvgDailyCountWithLimit.
@Test
public void testMinMaxAvgDailyCountWithLimit() throws Exception {
// Cannot vectorize due to virtual columns.
cannotVectorize();
testQuery("SELECT * FROM (" + " SELECT max(cnt), min(cnt), avg(cnt), TIME_EXTRACT(max(t), 'EPOCH') last_time, count(1) num_days FROM (\n" + " SELECT TIME_FLOOR(__time, 'P1D') AS t, count(1) cnt\n" + " FROM \"foo\"\n" + " GROUP BY 1\n" + " )" + ") LIMIT 1\n", ImmutableList.of(GroupByQuery.builder().setDataSource(new QueryDataSource(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).granularity(new PeriodGranularity(Period.days(1), null, DateTimeZone.UTC)).intervals(querySegmentSpec(Filtration.eternity())).aggregators(new CountAggregatorFactory("a0")).context(getTimeseriesContextWithFloorTime(TIMESERIES_CONTEXT_BY_GRAN, "d0")).build())).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setAggregatorSpecs(useDefault ? aggregators(new LongMaxAggregatorFactory("_a0", "a0"), new LongMinAggregatorFactory("_a1", "a0"), new LongSumAggregatorFactory("_a2:sum", "a0"), new CountAggregatorFactory("_a2:count"), new LongMaxAggregatorFactory("_a3", "d0"), new CountAggregatorFactory("_a4")) : aggregators(new LongMaxAggregatorFactory("_a0", "a0"), new LongMinAggregatorFactory("_a1", "a0"), new LongSumAggregatorFactory("_a2:sum", "a0"), new FilteredAggregatorFactory(new CountAggregatorFactory("_a2:count"), not(selector("a0", null, null))), new LongMaxAggregatorFactory("_a3", "d0"), new CountAggregatorFactory("_a4"))).setPostAggregatorSpecs(ImmutableList.of(new ArithmeticPostAggregator("_a2", "quotient", ImmutableList.of(new FieldAccessPostAggregator(null, "_a2:sum"), new FieldAccessPostAggregator(null, "_a2:count"))), expressionPostAgg("p0", "timestamp_extract(\"_a3\",'EPOCH','UTC')"))).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 1L, 1L, 1L, 978480000L, 6L }));
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class NewestSegmentFirstPolicyTest method testIteratorReturnsSegmentsAsSegmentsWasCompactedAndHaveDifferentTimezone.
@Test
public void testIteratorReturnsSegmentsAsSegmentsWasCompactedAndHaveDifferentTimezone() {
// Same indexSpec as what is set in the auto compaction config
Map<String, Object> indexSpec = mapper.convertValue(new IndexSpec(), new TypeReference<Map<String, Object>>() {
});
// Same partitionsSpec as what is set in the auto compaction config
PartitionsSpec partitionsSpec = NewestSegmentFirstIterator.findPartitionsSpecFromConfig(ClientCompactionTaskQueryTuningConfig.from(null, null));
// Create segments that were compacted (CompactionState != null) and have segmentGranularity=DAY
final VersionedIntervalTimeline<String, DataSegment> timeline = createTimeline(new SegmentGenerateSpec(Intervals.of("2017-10-02T00:00:00/2017-10-03T00:00:00"), new Period("P1D"), null, new CompactionState(partitionsSpec, null, null, null, indexSpec, null)));
// Duration of new segmentGranularity is the same as before (P1D),
// but we changed the timezone from UTC to Bangkok in the auto compaction spec
final CompactionSegmentIterator iterator = policy.reset(ImmutableMap.of(DATA_SOURCE, createCompactionConfig(130000, new Period("P0D"), new UserCompactionTaskGranularityConfig(new PeriodGranularity(new Period("P1D"), null, DateTimeZone.forTimeZone(TimeZone.getTimeZone("Asia/Bangkok"))), null, null))), ImmutableMap.of(DATA_SOURCE, timeline), Collections.emptyMap());
// We should get all segments in timeline back since skip offset is P0D.
Assert.assertTrue(iterator.hasNext());
List<DataSegment> expectedSegmentsToCompact = new ArrayList<>(timeline.findNonOvershadowedObjectsInInterval(Intervals.of("2017-10-01T00:00:00/2017-10-03T00:00:00"), Partitions.ONLY_COMPLETE));
Assert.assertEquals(ImmutableSet.copyOf(expectedSegmentsToCompact), ImmutableSet.copyOf(iterator.next()));
// No more
Assert.assertFalse(iterator.hasNext());
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class TimeFloorOperatorConversion method applyTimestampFloor.
/**
* Function that floors a DruidExpression to a particular granularity. Not actually used by the
* TimeFloorOperatorConversion, but I'm not sure where else to put this. It makes some sense in this file, since
* it's responsible for generating "timestamp_floor" calls.
*/
public static DruidExpression applyTimestampFloor(final DruidExpression input, final PeriodGranularity granularity, final ExprMacroTable macroTable) {
Preconditions.checkNotNull(input, "input");
Preconditions.checkNotNull(granularity, "granularity");
// Collapse floor chains if possible. Useful for constructs like CAST(FLOOR(__time TO QUARTER) AS DATE).
if (granularity.getPeriod().equals(Period.days(1))) {
final TimestampFloorExprMacro.TimestampFloorExpr floorExpr = Expressions.asTimestampFloorExpr(input, macroTable);
if (floorExpr != null) {
final PeriodGranularity inputGranularity = floorExpr.getGranularity();
if (Objects.equals(inputGranularity.getTimeZone(), granularity.getTimeZone()) && Objects.equals(inputGranularity.getOrigin(), granularity.getOrigin()) && periodIsDayMultiple(inputGranularity.getPeriod())) {
return input;
}
}
}
return DruidExpression.ofFunctionCall(input.getDruidType(), "timestamp_floor", ImmutableList.of(input, DruidExpression.ofStringLiteral(granularity.getPeriod().toString()), DruidExpression.ofLiteral(ColumnType.LONG, DruidExpression.numberLiteral(granularity.getOrigin() == null ? null : granularity.getOrigin().getMillis())), DruidExpression.ofStringLiteral(granularity.getTimeZone().toString())));
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class UniformGranularityTest method testPeriodSegmentGranularity.
@Test
public void testPeriodSegmentGranularity() {
final GranularitySpec spec = new UniformGranularitySpec(new PeriodGranularity(new Period("P1D"), null, DateTimes.inferTzFromString("America/Los_Angeles")), null, Lists.newArrayList(Intervals.of("2012-01-08T00-08:00/2012-01-11T00-08:00"), Intervals.of("2012-01-07T00-08:00/2012-01-08T00-08:00"), Intervals.of("2012-01-03T00-08:00/2012-01-04T00-08:00"), Intervals.of("2012-01-01T00-08:00/2012-01-03T00-08:00"), Intervals.of("2012-09-01T00-07:00/2012-09-03T00-07:00")));
Assert.assertTrue(spec.sortedBucketIntervals().iterator().hasNext());
final Iterable<Interval> intervals = spec.sortedBucketIntervals();
ArrayList<Long> actualIntervals = new ArrayList<>();
for (Interval interval : intervals) {
actualIntervals.add(interval.toDurationMillis());
}
final ISOChronology chrono = ISOChronology.getInstance(DateTimes.inferTzFromString("America/Los_Angeles"));
final ArrayList<Long> expectedIntervals = Lists.newArrayList(new Interval("2012-01-01/2012-01-02", chrono).toDurationMillis(), new Interval("2012-01-02/2012-01-03", chrono).toDurationMillis(), new Interval("2012-01-03/2012-01-04", chrono).toDurationMillis(), new Interval("2012-01-07/2012-01-08", chrono).toDurationMillis(), new Interval("2012-01-08/2012-01-09", chrono).toDurationMillis(), new Interval("2012-01-09/2012-01-10", chrono).toDurationMillis(), new Interval("2012-01-10/2012-01-11", chrono).toDurationMillis(), new Interval("2012-09-01/2012-09-02", chrono).toDurationMillis(), new Interval("2012-09-02/2012-09-03", chrono).toDurationMillis());
Assert.assertEquals(expectedIntervals, actualIntervals);
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class CastOperatorConversion method toDruidExpression.
@Override
public DruidExpression toDruidExpression(final PlannerContext plannerContext, final RowSignature rowSignature, final RexNode rexNode) {
final RexNode operand = ((RexCall) rexNode).getOperands().get(0);
final DruidExpression operandExpression = Expressions.toDruidExpression(plannerContext, rowSignature, operand);
if (operandExpression == null) {
return null;
}
final SqlTypeName fromType = operand.getType().getSqlTypeName();
final SqlTypeName toType = rexNode.getType().getSqlTypeName();
if (SqlTypeName.CHAR_TYPES.contains(fromType) && SqlTypeName.DATETIME_TYPES.contains(toType)) {
return castCharToDateTime(plannerContext, operandExpression, toType, Calcites.getColumnTypeForRelDataType(rexNode.getType()));
} else if (SqlTypeName.DATETIME_TYPES.contains(fromType) && SqlTypeName.CHAR_TYPES.contains(toType)) {
return castDateTimeToChar(plannerContext, operandExpression, fromType, Calcites.getColumnTypeForRelDataType(rexNode.getType()));
} else {
// Handle other casts.
final ExprType fromExprType = EXPRESSION_TYPES.get(fromType);
final ExprType toExprType = EXPRESSION_TYPES.get(toType);
if (fromExprType == null || toExprType == null) {
// We have no runtime type for these SQL types.
return null;
}
final DruidExpression typeCastExpression;
if (fromExprType != toExprType) {
// Ignore casts for simple extractions (use Function.identity) since it is ok in many cases.
typeCastExpression = operandExpression.map(Function.identity(), expression -> StringUtils.format("CAST(%s, '%s')", expression, toExprType.toString()));
} else {
typeCastExpression = operandExpression;
}
if (toType == SqlTypeName.DATE) {
// Floor to day when casting to DATE.
return TimeFloorOperatorConversion.applyTimestampFloor(typeCastExpression, new PeriodGranularity(Period.days(1), null, plannerContext.getTimeZone()), plannerContext.getExprMacroTable());
} else {
return typeCastExpression;
}
}
}
Aggregations