use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class GroupByQueryRunnerTest method testDimFilterHavingSpec.
@Test
public void testDimFilterHavingSpec() {
final DimFilterHavingSpec havingSpec = new DimFilterHavingSpec(new AndDimFilter(ImmutableList.of(new OrDimFilter(ImmutableList.of(new BoundDimFilter("rows", "2", null, true, false, null, null, StringComparators.NUMERIC), new SelectorDimFilter("idx", "217", null))), new SelectorDimFilter("__time", String.valueOf(DateTimes.of("2011-04-01").getMillis()), null))), null);
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.INDEX_LONG_MIN, QueryRunnerTestHelper.INDEX_LONG_MAX, QueryRunnerTestHelper.INDEX_DOUBLE_MIN, QueryRunnerTestHelper.INDEX_DOUBLE_MAX, QueryRunnerTestHelper.INDEX_FLOAT_MIN, QueryRunnerTestHelper.INDEX_FLOAT_MAX).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setHavingSpec(havingSpec);
final GroupByQuery fullQuery = builder.build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(fullQuery, "2011-04-01", "alias", "business", "rows", 2L, "idx", 217L, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 105L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 112L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 105.735462D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 112.987027D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 105.73546F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 112.98703F), makeRow(fullQuery, "2011-04-01", "alias", "mezzanine", "rows", 6L, "idx", 4420L, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 107L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 1193L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 107.047773D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 1193.556278D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 107.047775F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 1193.5563F), makeRow(fullQuery, "2011-04-01", "alias", "premium", "rows", 6L, "idx", 4416L, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 122L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 1321L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 122.141707D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 1321.375057D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 122.14171F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 1321.375F));
TestHelper.assertExpectedObjects(expectedResults, GroupByQueryRunnerTestHelper.runQuery(factory, runner, fullQuery), "dimfilter-havingspec");
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class GroupByQueryRunnerTest method testBySegmentResults.
@Test
public void testBySegmentResults() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setDimFilter(new SelectorDimFilter("quality", "mezzanine", null)).setContext(ImmutableMap.of(QueryContexts.BY_SEGMENT_KEY, true));
final GroupByQuery fullQuery = builder.build();
int segmentCount = 32;
Result<BySegmentResultValue> singleSegmentResult = new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new BySegmentResultValueClass<>(Collections.singletonList(makeRow(fullQuery, "2011-04-01", "alias", "mezzanine", "rows", 6L, "idx", 4420L)), QueryRunnerTestHelper.SEGMENT_ID.toString(), Intervals.of("2011-04-02T00:00:00.000Z/2011-04-04T00:00:00.000Z")));
List<Result> bySegmentResults = new ArrayList<>();
for (int i = 0; i < segmentCount; i++) {
bySegmentResults.add(singleSegmentResult);
}
QueryToolChest toolChest = factory.getToolchest();
List<QueryRunner<ResultRow>> singleSegmentRunners = new ArrayList<>();
for (int i = 0; i < segmentCount; i++) {
singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
}
ExecutorService exec = Executors.newCachedThreadPool();
QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(Executors.newCachedThreadPool(), singleSegmentRunners)), toolChest));
TestHelper.assertExpectedObjects(bySegmentResults, theRunner.run(QueryPlus.wrap(fullQuery)), "bySegment");
exec.shutdownNow();
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class DruidSqlParserUtils method convertSqlNodeToGranularity.
/**
* This method is used to extract the granularity from a SqlNode representing following function calls:
* 1. FLOOR(__time TO TimeUnit)
* 2. TIME_FLOOR(__time, 'PT1H')
*
* Validation on the sqlNode is contingent to following conditions:
* 1. sqlNode is an instance of SqlCall
* 2. Operator is either one of TIME_FLOOR or FLOOR
* 3. Number of operands in the call are 2
* 4. First operand is a SimpleIdentifier representing __time
* 5. If operator is TIME_FLOOR, the second argument is a literal, and can be converted to the Granularity class
* 6. If operator is FLOOR, the second argument is a TimeUnit, and can be mapped using {@link TimeUnits}
*
* Since it is to be used primarily while parsing the SqlNode, it is wrapped in {@code convertSqlNodeToGranularityThrowingParseExceptions}
*
* @param sqlNode SqlNode representing a call to a function
* @return Granularity as intended by the function call
* @throws ParseException SqlNode cannot be converted a granularity
*/
public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException {
final String genericParseFailedMessageFormatString = "Encountered %s after PARTITIONED BY. " + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function";
if (!(sqlNode instanceof SqlCall)) {
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
SqlCall sqlCall = (SqlCall) sqlNode;
String operatorName = sqlCall.getOperator().getName();
Preconditions.checkArgument("FLOOR".equalsIgnoreCase(operatorName) || TimeFloorOperatorConversion.SQL_FUNCTION_NAME.equalsIgnoreCase(operatorName), StringUtils.format("PARTITIONED BY clause only supports FLOOR(__time TO <unit> and %s(__time, period) functions", TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
List<SqlNode> operandList = sqlCall.getOperandList();
Preconditions.checkArgument(operandList.size() == 2, StringUtils.format("%s in PARTITIONED BY clause must have two arguments", operatorName));
// Check if the first argument passed in the floor function is __time
SqlNode timeOperandSqlNode = operandList.get(0);
Preconditions.checkArgument(timeOperandSqlNode.getKind().equals(SqlKind.IDENTIFIER), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
SqlIdentifier timeOperandSqlIdentifier = (SqlIdentifier) timeOperandSqlNode;
Preconditions.checkArgument(timeOperandSqlIdentifier.getSimple().equals(ColumnHolder.TIME_COLUMN_NAME), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
// If the floor function is of form TIME_FLOOR(__time, 'PT1H')
if (operatorName.equalsIgnoreCase(TimeFloorOperatorConversion.SQL_FUNCTION_NAME)) {
SqlNode granularitySqlNode = operandList.get(1);
Preconditions.checkArgument(granularitySqlNode.getKind().equals(SqlKind.LITERAL), "Second argument to TIME_FLOOR in PARTITIONED BY clause must be a period like 'PT1H'");
String granularityString = SqlLiteral.unchain(granularitySqlNode).toValue();
Period period;
try {
period = new Period(granularityString);
} catch (IllegalArgumentException e) {
throw new ParseException(StringUtils.format("%s is an invalid period string", granularitySqlNode.toString()));
}
return new PeriodGranularity(period, null, null);
} else if ("FLOOR".equalsIgnoreCase(operatorName)) {
// If the floor function is of form FLOOR(__time TO DAY)
SqlNode granularitySqlNode = operandList.get(1);
// In future versions of Calcite, this can be checked via
// granularitySqlNode.getKind().equals(SqlKind.INTERVAL_QUALIFIER)
Preconditions.checkArgument(granularitySqlNode instanceof SqlIntervalQualifier, "Second argument to the FLOOR function in PARTITIONED BY clause is not a valid granularity. " + "Please refer to the documentation of FLOOR function");
SqlIntervalQualifier granularityIntervalQualifier = (SqlIntervalQualifier) granularitySqlNode;
Period period = TimeUnits.toPeriod(granularityIntervalQualifier.timeUnitRange);
Preconditions.checkNotNull(period, StringUtils.format("%s is not a valid granularity for ingestion", granularityIntervalQualifier.timeUnitRange.toString()));
return new PeriodGranularity(period, null, null);
}
// Shouldn't reach here
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class HadoopIngestionSpecTest method testPeriodSegmentGranularitySpec.
@Test
public void testPeriodSegmentGranularitySpec() {
final HadoopIngestionSpec schema;
try {
schema = jsonReadWriteRead("{\n" + " \"dataSchema\": {\n" + " \"dataSource\": \"foo\",\n" + " \"metricsSpec\": [],\n" + " \"granularitySpec\": {\n" + " \"type\": \"uniform\",\n" + " \"segmentGranularity\": {\"type\": \"period\", \"period\":\"PT1H\", \"timeZone\":\"America/Los_Angeles\"},\n" + " \"intervals\": [\"2012-01-01/P1D\"]\n" + " }\n" + " }\n" + "}", HadoopIngestionSpec.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
final UniformGranularitySpec granularitySpec = (UniformGranularitySpec) schema.getDataSchema().getGranularitySpec();
Assert.assertEquals("getSegmentGranularity", new PeriodGranularity(new Period("PT1H"), null, DateTimes.inferTzFromString("America/Los_Angeles")), granularitySpec.getSegmentGranularity());
}
use of org.apache.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class SegmentAllocateActionTest method testSameIntervalWithSegmentGranularity.
@Test
public void testSameIntervalWithSegmentGranularity() {
final Task task = NoopTask.create();
taskActionTestKit.getTaskLockbox().add(task);
Granularity segmentGranularity = new PeriodGranularity(Period.hours(1), null, DateTimes.inferTzFromString("Asia/Shanghai"));
final SegmentIdWithShardSpec id1 = allocate(task, PARTY_TIME, Granularities.MINUTE, segmentGranularity, "s1", null);
final SegmentIdWithShardSpec id2 = allocate(task, PARTY_TIME, Granularities.MINUTE, segmentGranularity, "s2", null);
Assert.assertNotNull(id1);
Assert.assertNotNull(id2);
}
Aggregations