Search in sources :

Example 6 with Filtration

use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.

the class DruidQueryTest method test_filtration_joinDataSource_intervalInBaseTableFilter_left.

@Test
public void test_filtration_joinDataSource_intervalInBaseTableFilter_left() {
    DataSource dataSource = join(JoinType.LEFT, filterWithInterval);
    DataSource expectedDataSource = join(JoinType.LEFT, selectorFilter);
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, otherFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, expectedDataSource, otherFilter, Intervals.utc(100, 200));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Example 7 with Filtration

use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.

the class DruidQuery method toScanQuery.

/**
 * Return this query as a Scan query, or null if this query is not compatible with Scan.
 *
 * @return query or null
 */
@Nullable
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (grouping != null) {
        // Scan cannot GROUP BY.
        return null;
    }
    if (outputRowSignature.size() == 0) {
        // Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
        throw new ISE("Cannot convert to Scan query without any columns.");
    }
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final List<ScanQuery.OrderBy> orderByColumns;
    long scanOffset = 0L;
    long scanLimit = 0L;
    if (sorting != null) {
        scanOffset = sorting.getOffsetLimit().getOffset();
        if (sorting.getOffsetLimit().hasLimit()) {
            final long limit = sorting.getOffsetLimit().getLimit();
            if (limit == 0) {
                // Can't handle zero limit (the Scan query engine would treat it as unlimited).
                return null;
            }
            scanLimit = limit;
        }
        orderByColumns = sorting.getOrderBys().stream().map(orderBy -> new ScanQuery.OrderBy(orderBy.getDimension(), orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING ? ScanQuery.Order.DESCENDING : ScanQuery.Order.ASCENDING)).collect(Collectors.toList());
    } else {
        orderByColumns = Collections.emptyList();
    }
    if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) && (orderByColumns.size() > 1 || orderByColumns.stream().anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
        // Cannot handle this ordering.
        // Scan cannot ORDER BY non-time columns.
        plannerContext.setPlanningError("SQL query requires order by non-time column %s that is not supported.", orderByColumns);
        return null;
    }
    // Compute the list of columns to select, sorted and deduped.
    final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
    orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
    return new ScanQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST, 0, scanOffset, scanLimit, null, orderByColumns, filtration.getDimFilter(), ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}
Also used : GroupByRules(org.apache.druid.sql.calcite.rule.GroupByRules) SortedSet(java.util.SortedSet) RowSignatures(org.apache.druid.sql.calcite.table.RowSignatures) OffsetLimit(org.apache.druid.sql.calcite.planner.OffsetLimit) Pair(org.apache.druid.java.util.common.Pair) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) RexNode(org.apache.calcite.rex.RexNode) Map(java.util.Map) DimFilterHavingSpec(org.apache.druid.query.groupby.having.DimFilterHavingSpec) InvertedTopNMetricSpec(org.apache.druid.query.topn.InvertedTopNMetricSpec) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) QueryFeatureInspector(org.apache.druid.sql.calcite.run.QueryFeatureInspector) SqlKind(org.apache.calcite.sql.SqlKind) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) DataSource(org.apache.druid.query.DataSource) TopNMetricSpec(org.apache.druid.query.topn.TopNMetricSpec) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) NumericTopNMetricSpec(org.apache.druid.query.topn.NumericTopNMetricSpec) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) ValueType(org.apache.druid.segment.column.ValueType) RelFieldCollation(org.apache.calcite.rel.RelFieldCollation) Collectors(java.util.stream.Collectors) RexInputRef(org.apache.calcite.rex.RexInputRef) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) DimFilter(org.apache.druid.query.filter.DimFilter) Sort(org.apache.calcite.rel.core.Sort) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) Optional(java.util.Optional) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) Expressions(org.apache.druid.sql.calcite.expression.Expressions) Project(org.apache.calcite.rel.core.Project) Iterables(com.google.common.collect.Iterables) Granularity(org.apache.druid.java.util.common.granularity.Granularity) HashMap(java.util.HashMap) DimensionTopNMetricSpec(org.apache.druid.query.topn.DimensionTopNMetricSpec) Filter(org.apache.calcite.rel.core.Filter) QueryFeature(org.apache.druid.sql.calcite.run.QueryFeature) Iterators(com.google.common.collect.Iterators) ScanQuery(org.apache.druid.query.scan.ScanQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HashSet(java.util.HashSet) ColumnHolder(org.apache.druid.segment.column.ColumnHolder) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) StringComparators(org.apache.druid.query.ordering.StringComparators) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) JoinDataSource(org.apache.druid.query.JoinDataSource) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Nonnull(javax.annotation.Nonnull) ImmutableSortedMap(com.google.common.collect.ImmutableSortedMap) Nullable(javax.annotation.Nullable) Filtration(org.apache.druid.sql.calcite.filtration.Filtration) RelDataType(org.apache.calcite.rel.type.RelDataType) VirtualColumns(org.apache.druid.segment.VirtualColumns) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) StringComparator(org.apache.druid.query.ordering.StringComparator) RexBuilder(org.apache.calcite.rex.RexBuilder) VirtualColumn(org.apache.druid.segment.VirtualColumn) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Ints(com.google.common.primitives.Ints) Aggregate(org.apache.calcite.rel.core.Aggregate) Granularities(org.apache.druid.java.util.common.granularity.Granularities) IntList(it.unimi.dsi.fastutil.ints.IntList) Types(org.apache.druid.segment.column.Types) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) AggregateCall(org.apache.calcite.rel.core.AggregateCall) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Calcites(org.apache.druid.sql.calcite.planner.Calcites) Collections(java.util.Collections) Filtration(org.apache.druid.sql.calcite.filtration.Filtration) ScanQuery(org.apache.druid.query.scan.ScanQuery) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) TreeSet(java.util.TreeSet) ISE(org.apache.druid.java.util.common.ISE) Nullable(javax.annotation.Nullable)

Example 8 with Filtration

use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.

the class DruidQuery method toTimeseriesQuery.

/**
 * Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
 *
 * @return query
 */
@Nullable
private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES) || grouping == null || grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs()) || grouping.getHavingFilter() != null) {
        return null;
    }
    if (sorting != null && sorting.getOffsetLimit().hasOffset()) {
        // Timeseries cannot handle offsets.
        return null;
    }
    final Granularity queryGranularity;
    final boolean descending;
    int timeseriesLimit = 0;
    final Map<String, Object> theContext = new HashMap<>();
    if (grouping.getDimensions().isEmpty()) {
        queryGranularity = Granularities.ALL;
        descending = false;
    } else if (grouping.getDimensions().size() == 1) {
        final DimensionExpression dimensionExpression = Iterables.getOnlyElement(grouping.getDimensions());
        queryGranularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(), plannerContext.getExprMacroTable());
        if (queryGranularity == null) {
            // Timeseries only applies if the single dimension is granular __time.
            return null;
        }
        theContext.put(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec().getOutputName());
        if (sorting != null) {
            if (sorting.getOffsetLimit().hasLimit()) {
                final long limit = sorting.getOffsetLimit().getLimit();
                if (limit == 0) {
                    // Can't handle zero limit (the Timeseries query engine would treat it as unlimited).
                    return null;
                }
                timeseriesLimit = Ints.checkedCast(limit);
            }
            switch(sorting.getTimeSortKind(dimensionExpression.getOutputName())) {
                case UNORDERED:
                case TIME_ASCENDING:
                    descending = false;
                    break;
                case TIME_DESCENDING:
                    descending = true;
                    break;
                default:
                    // Sorting on a metric, maybe. Timeseries cannot handle.
                    return null;
            }
        } else {
            // No limitSpec.
            descending = false;
        }
    } else {
        // More than one dimension, timeseries cannot handle.
        return null;
    }
    // was originally a groupBy query, but with the grouping dimensions removed away in Grouping#applyProject
    if (!Granularities.ALL.equals(queryGranularity) || grouping.hasGroupingDimensionsDropped()) {
        theContext.put(TimeseriesQuery.SKIP_EMPTY_BUCKETS, true);
    }
    theContext.putAll(plannerContext.getQueryContext());
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sorting != null && sorting.getProjection() != null) {
        postAggregators.addAll(sorting.getProjection().getPostAggregators());
    }
    return new TimeseriesQuery(newDataSource, filtration.getQuerySegmentSpec(), descending, getVirtualColumns(false), filtration.getDimFilter(), queryGranularity, grouping.getAggregatorFactories(), postAggregators, timeseriesLimit, ImmutableSortedMap.copyOf(theContext));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) HashMap(java.util.HashMap) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Nullable(javax.annotation.Nullable)

Example 9 with Filtration

use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.

the class DruidQueryTest method test_filtration_intervalInQueryFilter.

@Test
public void test_filtration_intervalInQueryFilter() {
    DataSource dataSource = new TableDataSource("test");
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, filterWithInterval, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, dataSource, selectorFilter, Intervals.utc(100, 200));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) TableDataSource(org.apache.druid.query.TableDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Example 10 with Filtration

use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.

the class DruidQueryTest method test_filtration_noJoinAndInterval.

@Test
public void test_filtration_noJoinAndInterval() {
    DataSource dataSource = new TableDataSource("test");
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, selectorFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, dataSource, selectorFilter, Intervals.ETERNITY);
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) TableDataSource(org.apache.druid.query.TableDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Aggregations

JoinDataSource (org.apache.druid.query.JoinDataSource)13 Filtration (org.apache.druid.sql.calcite.filtration.Filtration)13 DataSource (org.apache.druid.query.DataSource)12 TableDataSource (org.apache.druid.query.TableDataSource)8 Test (org.junit.Test)8 IntArrayList (it.unimi.dsi.fastutil.ints.IntArrayList)4 ArrayList (java.util.ArrayList)4 Nullable (javax.annotation.Nullable)4 QueryDataSource (org.apache.druid.query.QueryDataSource)4 PostAggregator (org.apache.druid.query.aggregation.PostAggregator)4 HashMap (java.util.HashMap)3 Granularity (org.apache.druid.java.util.common.granularity.Granularity)3 DimensionExpression (org.apache.druid.sql.calcite.aggregation.DimensionExpression)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 TimeseriesQuery (org.apache.druid.query.timeseries.TimeseriesQuery)2 Preconditions (com.google.common.base.Preconditions)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableSortedMap (com.google.common.collect.ImmutableSortedMap)1 Iterables (com.google.common.collect.Iterables)1 Iterators (com.google.common.collect.Iterators)1