Search in sources :

Example 1 with DimensionExpression

use of org.apache.druid.sql.calcite.aggregation.DimensionExpression in project druid by druid-io.

the class DruidQuery method computeGrouping.

@Nonnull
private static Grouping computeGrouping(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry, final RexBuilder rexBuilder, final boolean finalizeAggregations) {
    final Aggregate aggregate = Preconditions.checkNotNull(partialQuery.getAggregate(), "aggregate");
    final Project aggregateProject = partialQuery.getAggregateProject();
    final List<DimensionExpression> dimensions = computeDimensions(partialQuery, plannerContext, rowSignature, virtualColumnRegistry);
    final Subtotals subtotals = computeSubtotals(partialQuery, rowSignature);
    final List<Aggregation> aggregations = computeAggregations(partialQuery, plannerContext, rowSignature, virtualColumnRegistry, rexBuilder, finalizeAggregations);
    final RowSignature aggregateRowSignature = RowSignatures.fromRelDataType(ImmutableList.copyOf(Iterators.concat(dimensions.stream().map(DimensionExpression::getOutputName).iterator(), aggregations.stream().map(Aggregation::getOutputName).iterator())), aggregate.getRowType());
    final DimFilter havingFilter = computeHavingFilter(partialQuery, plannerContext, aggregateRowSignature);
    final Grouping grouping = Grouping.create(dimensions, subtotals, aggregations, havingFilter, aggregateRowSignature);
    if (aggregateProject == null) {
        return grouping;
    } else {
        return grouping.applyProject(plannerContext, aggregateProject);
    }
}
Also used : Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Project(org.apache.calcite.rel.core.Project) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) Aggregate(org.apache.calcite.rel.core.Aggregate) RowSignature(org.apache.druid.segment.column.RowSignature) DimFilter(org.apache.druid.query.filter.DimFilter) Nonnull(javax.annotation.Nonnull)

Example 2 with DimensionExpression

use of org.apache.druid.sql.calcite.aggregation.DimensionExpression in project druid by druid-io.

the class DruidQuery method toGroupByQuery.

/**
 * Return this query as a GroupBy query, or null if this query is not compatible with GroupBy.
 *
 * @return query or null
 */
@Nullable
private GroupByQuery toGroupByQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (grouping == null) {
        return null;
    }
    if (sorting != null && sorting.getOffsetLimit().hasLimit() && sorting.getOffsetLimit().getLimit() <= 0) {
        // Cannot handle zero or negative limits.
        return null;
    }
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final DimFilterHavingSpec havingSpec;
    if (grouping.getHavingFilter() != null) {
        havingSpec = new DimFilterHavingSpec(Filtration.create(grouping.getHavingFilter()).optimizeFilterOnly(grouping.getOutputRowSignature()).getDimFilter(), true);
    } else {
        havingSpec = null;
    }
    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sorting != null && sorting.getProjection() != null) {
        postAggregators.addAll(sorting.getProjection().getPostAggregators());
    }
    GroupByQuery query = new GroupByQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), filtration.getDimFilter(), Granularities.ALL, grouping.getDimensionSpecs(), grouping.getAggregatorFactories(), postAggregators, havingSpec, Optional.ofNullable(sorting).orElse(Sorting.none()).limitSpec(), grouping.getSubtotals().toSubtotalsSpec(grouping.getDimensionSpecs()), ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
    // We don't apply timestamp computation optimization yet when limit is pushed down. Maybe someday.
    if (query.getLimitSpec() instanceof DefaultLimitSpec && query.isApplyLimitPushDown()) {
        return query;
    }
    Map<String, Object> theContext = new HashMap<>();
    Granularity queryGranularity = null;
    // now, part of the query plan logic is handled in GroupByStrategyV2.
    if (!grouping.getDimensions().isEmpty()) {
        for (DimensionExpression dimensionExpression : grouping.getDimensions()) {
            Granularity granularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(), plannerContext.getExprMacroTable());
            if (granularity == null) {
                continue;
            }
            if (queryGranularity != null) {
                // group by more than one timestamp_floor
                // eg: group by timestamp_floor(__time to DAY),timestamp_floor(__time, to HOUR)
                queryGranularity = null;
                break;
            }
            queryGranularity = granularity;
            int timestampDimensionIndexInDimensions = grouping.getDimensions().indexOf(dimensionExpression);
            // these settings will only affect the most inner query sent to the down streaming compute nodes
            theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD, dimensionExpression.getOutputName());
            theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_INDEX, timestampDimensionIndexInDimensions);
            theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, queryGranularity);
        }
    }
    if (queryGranularity == null) {
        return query;
    }
    return query.withOverriddenContext(theContext);
}
Also used : DimFilterHavingSpec(org.apache.druid.query.groupby.having.DimFilterHavingSpec) Filtration(org.apache.druid.sql.calcite.filtration.Filtration) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) HashMap(java.util.HashMap) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Nullable(javax.annotation.Nullable)

Example 3 with DimensionExpression

use of org.apache.druid.sql.calcite.aggregation.DimensionExpression in project druid by druid-io.

the class DruidQuery method computeDimensions.

/**
 * Returns dimensions corresponding to {@code aggregate.getGroupSet()}, in the same order.
 *
 * @param partialQuery          partial query
 * @param plannerContext        planner context
 * @param rowSignature          source row signature
 * @param virtualColumnRegistry re-usable virtual column references
 *
 * @return dimensions
 *
 * @throws CannotBuildQueryException if dimensions cannot be computed
 */
private static List<DimensionExpression> computeDimensions(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry) {
    final Aggregate aggregate = Preconditions.checkNotNull(partialQuery.getAggregate());
    final List<DimensionExpression> dimensions = new ArrayList<>();
    final String outputNamePrefix = Calcites.findUnusedPrefixForDigits("d", rowSignature.getColumnNames());
    int outputNameCounter = 0;
    for (int i : aggregate.getGroupSet()) {
        // Dimension might need to create virtual columns. Avoid giving it a name that would lead to colliding columns.
        final RexNode rexNode = Expressions.fromFieldAccess(rowSignature, partialQuery.getSelectProject(), i);
        final DruidExpression druidExpression = Expressions.toDruidExpression(plannerContext, rowSignature, rexNode);
        if (druidExpression == null) {
            throw new CannotBuildQueryException(aggregate, rexNode);
        }
        final RelDataType dataType = rexNode.getType();
        final ColumnType outputType = Calcites.getColumnTypeForRelDataType(dataType);
        if (Types.isNullOr(outputType, ValueType.COMPLEX)) {
            // Can't group on unknown or COMPLEX types.
            plannerContext.setPlanningError("SQL requires a group-by on a column of type %s that is unsupported.", outputType);
            throw new CannotBuildQueryException(aggregate, rexNode);
        }
        final String dimOutputName = outputNamePrefix + outputNameCounter++;
        if (!druidExpression.isSimpleExtraction()) {
            final String virtualColumn = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(druidExpression, dataType);
            dimensions.add(DimensionExpression.ofVirtualColumn(virtualColumn, dimOutputName, druidExpression, outputType));
        } else {
            dimensions.add(DimensionExpression.ofSimpleColumn(dimOutputName, druidExpression, outputType));
        }
    }
    return dimensions;
}
Also used : ColumnType(org.apache.druid.segment.column.ColumnType) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) RelDataType(org.apache.calcite.rel.type.RelDataType) Aggregate(org.apache.calcite.rel.core.Aggregate) RexNode(org.apache.calcite.rex.RexNode)

Example 4 with DimensionExpression

use of org.apache.druid.sql.calcite.aggregation.DimensionExpression in project druid by druid-io.

the class Grouping method applyProject.

/**
 * Applies a post-grouping projection.
 *
 * @see DruidQuery#computeGrouping which uses this
 */
public Grouping applyProject(final PlannerContext plannerContext, final Project project) {
    final List<DimensionExpression> newDimensions = new ArrayList<>();
    final List<Aggregation> newAggregations = new ArrayList<>(aggregations);
    final Subtotals newSubtotals;
    final Projection postAggregationProjection = Projection.postAggregation(project, plannerContext, outputRowSignature, "p");
    postAggregationProjection.getPostAggregators().forEach(postAggregator -> newAggregations.add(Aggregation.create(postAggregator)));
    // Remove literal dimensions that did not appear in the projection. This is useful for queries
    // like "SELECT COUNT(*) FROM tbl GROUP BY 'dummy'" which some tools can generate, and for which we don't
    // actually want to include a dimension 'dummy'.
    final ImmutableBitSet aggregateProjectBits = RelOptUtil.InputFinder.bits(project.getChildExps(), null);
    final int[] newDimIndexes = new int[dimensions.size()];
    boolean droppedDimensions = false;
    for (int i = 0; i < dimensions.size(); i++) {
        final DimensionExpression dimension = dimensions.get(i);
        if (Parser.parse(dimension.getDruidExpression().getExpression(), plannerContext.getExprMacroTable()).isLiteral() && !aggregateProjectBits.get(i)) {
            droppedDimensions = true;
            newDimIndexes[i] = -1;
        } else {
            newDimIndexes[i] = newDimensions.size();
            newDimensions.add(dimension);
        }
    }
    // Renumber subtotals, if needed, to account for removed dummy dimensions.
    if (newDimensions.size() != dimensions.size()) {
        final List<IntList> newSubtotalsList = new ArrayList<>();
        for (IntList subtotal : subtotals.getSubtotals()) {
            final IntList newSubtotal = new IntArrayList();
            for (int dimIndex : subtotal) {
                final int newDimIndex = newDimIndexes[dimIndex];
                if (newDimIndex >= 0) {
                    newSubtotal.add(newDimIndex);
                }
            }
            newSubtotalsList.add(newSubtotal);
        }
        newSubtotals = new Subtotals(newSubtotalsList);
    } else {
        newSubtotals = subtotals;
    }
    return Grouping.create(newDimensions, newSubtotals, newAggregations, havingFilter, postAggregationProjection.getOutputRowSignature(), droppedDimensions);
}
Also used : ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) ArrayList(java.util.ArrayList) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) IntList(it.unimi.dsi.fastutil.ints.IntList) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList)

Example 5 with DimensionExpression

use of org.apache.druid.sql.calcite.aggregation.DimensionExpression in project druid by druid-io.

the class DruidQuery method toTimeseriesQuery.

/**
 * Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
 *
 * @return query
 */
@Nullable
private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES) || grouping == null || grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs()) || grouping.getHavingFilter() != null) {
        return null;
    }
    if (sorting != null && sorting.getOffsetLimit().hasOffset()) {
        // Timeseries cannot handle offsets.
        return null;
    }
    final Granularity queryGranularity;
    final boolean descending;
    int timeseriesLimit = 0;
    final Map<String, Object> theContext = new HashMap<>();
    if (grouping.getDimensions().isEmpty()) {
        queryGranularity = Granularities.ALL;
        descending = false;
    } else if (grouping.getDimensions().size() == 1) {
        final DimensionExpression dimensionExpression = Iterables.getOnlyElement(grouping.getDimensions());
        queryGranularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(), plannerContext.getExprMacroTable());
        if (queryGranularity == null) {
            // Timeseries only applies if the single dimension is granular __time.
            return null;
        }
        theContext.put(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec().getOutputName());
        if (sorting != null) {
            if (sorting.getOffsetLimit().hasLimit()) {
                final long limit = sorting.getOffsetLimit().getLimit();
                if (limit == 0) {
                    // Can't handle zero limit (the Timeseries query engine would treat it as unlimited).
                    return null;
                }
                timeseriesLimit = Ints.checkedCast(limit);
            }
            switch(sorting.getTimeSortKind(dimensionExpression.getOutputName())) {
                case UNORDERED:
                case TIME_ASCENDING:
                    descending = false;
                    break;
                case TIME_DESCENDING:
                    descending = true;
                    break;
                default:
                    // Sorting on a metric, maybe. Timeseries cannot handle.
                    return null;
            }
        } else {
            // No limitSpec.
            descending = false;
        }
    } else {
        // More than one dimension, timeseries cannot handle.
        return null;
    }
    // was originally a groupBy query, but with the grouping dimensions removed away in Grouping#applyProject
    if (!Granularities.ALL.equals(queryGranularity) || grouping.hasGroupingDimensionsDropped()) {
        theContext.put(TimeseriesQuery.SKIP_EMPTY_BUCKETS, true);
    }
    theContext.putAll(plannerContext.getQueryContext());
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sorting != null && sorting.getProjection() != null) {
        postAggregators.addAll(sorting.getProjection().getPostAggregators());
    }
    return new TimeseriesQuery(newDataSource, filtration.getQuerySegmentSpec(), descending, getVirtualColumns(false), filtration.getDimFilter(), queryGranularity, grouping.getAggregatorFactories(), postAggregators, timeseriesLimit, ImmutableSortedMap.copyOf(theContext));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) HashMap(java.util.HashMap) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Nullable(javax.annotation.Nullable)

Aggregations

DimensionExpression (org.apache.druid.sql.calcite.aggregation.DimensionExpression)6 IntArrayList (it.unimi.dsi.fastutil.ints.IntArrayList)5 ArrayList (java.util.ArrayList)5 JoinDataSource (org.apache.druid.query.JoinDataSource)3 Aggregation (org.apache.druid.sql.calcite.aggregation.Aggregation)3 HashMap (java.util.HashMap)2 Nullable (javax.annotation.Nullable)2 Aggregate (org.apache.calcite.rel.core.Aggregate)2 Granularity (org.apache.druid.java.util.common.granularity.Granularity)2 DataSource (org.apache.druid.query.DataSource)2 QueryDataSource (org.apache.druid.query.QueryDataSource)2 PostAggregator (org.apache.druid.query.aggregation.PostAggregator)2 Filtration (org.apache.druid.sql.calcite.filtration.Filtration)2 IntList (it.unimi.dsi.fastutil.ints.IntList)1 HashSet (java.util.HashSet)1 Nonnull (javax.annotation.Nonnull)1 Project (org.apache.calcite.rel.core.Project)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 RexNode (org.apache.calcite.rex.RexNode)1 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)1