use of org.apache.druid.sql.calcite.rel.DruidQuery in project druid by druid-io.
the class NativeQueryMaker method runQuery.
@Override
public Sequence<Object[]> runQuery(final DruidQuery druidQuery) {
final Query<?> query = druidQuery.getQuery();
if (plannerContext.getPlannerConfig().isRequireTimeCondition() && !(druidQuery.getDataSource() instanceof InlineDataSource)) {
if (Intervals.ONLY_ETERNITY.equals(findBaseDataSourceIntervals(query))) {
throw new CannotBuildQueryException("requireTimeCondition is enabled, all queries must include a filter condition on the __time column");
}
}
int numFilters = plannerContext.getPlannerConfig().getMaxNumericInFilters();
// Instead of IN(v1,v2,v3) user should specify IN('v1','v2','v3')
if (numFilters != PlannerConfig.NUM_FILTER_NOT_USED) {
if (query.getFilter() instanceof OrDimFilter) {
OrDimFilter orDimFilter = (OrDimFilter) query.getFilter();
int numBoundFilters = 0;
for (DimFilter filter : orDimFilter.getFields()) {
numBoundFilters += filter instanceof BoundDimFilter ? 1 : 0;
}
if (numBoundFilters > numFilters) {
String dimension = ((BoundDimFilter) (orDimFilter.getFields().get(0))).getDimension();
throw new UOE(StringUtils.format("The number of values in the IN clause for [%s] in query exceeds configured maxNumericFilter limit of [%s] for INs. Cast [%s] values of IN clause to String", dimension, numFilters, orDimFilter.getFields().size()));
}
}
}
final List<String> rowOrder;
if (query instanceof TimeseriesQuery && !druidQuery.getGrouping().getDimensions().isEmpty()) {
// Hack for timeseries queries: when generating them, DruidQuery.toTimeseriesQuery translates a dimension
// based on a timestamp_floor expression into a 'granularity'. This is not reflected in the druidQuery's
// output row signature, so we have to account for it here.
// TODO: We can remove this once https://github.com/apache/druid/issues/9974 is done.
final String timeDimension = Iterables.getOnlyElement(druidQuery.getGrouping().getDimensions()).getOutputName();
rowOrder = druidQuery.getOutputRowSignature().getColumnNames().stream().map(f -> timeDimension.equals(f) ? ColumnHolder.TIME_COLUMN_NAME : f).collect(Collectors.toList());
} else {
rowOrder = druidQuery.getOutputRowSignature().getColumnNames();
}
final List<SqlTypeName> columnTypes = druidQuery.getOutputRowType().getFieldList().stream().map(f -> f.getType().getSqlTypeName()).collect(Collectors.toList());
return execute(query, mapColumnList(rowOrder, fieldMapping), mapColumnList(columnTypes, fieldMapping));
}
use of org.apache.druid.sql.calcite.rel.DruidQuery in project druid by druid-io.
the class DruidPlanner method explainSqlPlanAsNativeQueries.
/**
* This method doesn't utilize the Calcite's internal {@link RelOptUtil#dumpPlan} since that tends to be verbose
* and not indicative of the native Druid Queries which will get executed
* This method assumes that the Planner has converted the RelNodes to DruidRels, and thereby we can implictly cast it
*
* @param rel Instance of the root {@link DruidRel} which is formed by running the planner transformations on it
* @return A string representing an array of native queries that correspond to the given SQL query, in JSON format
* @throws JsonProcessingException
*/
private String explainSqlPlanAsNativeQueries(DruidRel<?> rel) throws JsonProcessingException {
ObjectMapper jsonMapper = plannerContext.getJsonMapper();
List<DruidQuery> druidQueryList;
druidQueryList = flattenOutermostRel(rel).stream().map(druidRel -> druidRel.toDruidQuery(false)).collect(Collectors.toList());
// Putting the queries as object node in an ArrayNode, since directly returning a list causes issues when
// serializing the "queryType". Another method would be to create a POJO containing query and signature, and then
// serializing it using normal list method.
ArrayNode nativeQueriesArrayNode = jsonMapper.createArrayNode();
for (DruidQuery druidQuery : druidQueryList) {
Query<?> nativeQuery = druidQuery.getQuery();
ObjectNode objectNode = jsonMapper.createObjectNode();
objectNode.put("query", jsonMapper.convertValue(nativeQuery, ObjectNode.class));
objectNode.put("signature", jsonMapper.convertValue(druidQuery.getOutputRowSignature(), ArrayNode.class));
nativeQueriesArrayNode.add(objectNode);
}
return jsonMapper.writeValueAsString(nativeQueriesArrayNode);
}
Aggregations