Search in sources :

Example 26 with DataSource

use of org.apache.druid.query.DataSource in project druid by druid-io.

the class SegmentManagerBroadcastJoinIndexedTableTest method testLoadIndexedTable.

@Test
public void testLoadIndexedTable() throws IOException, SegmentLoadingException {
    final DataSource dataSource = new GlobalTableDataSource(TABLE_NAME);
    Assert.assertFalse(joinableFactory.isDirectlyJoinable(dataSource));
    final String version = DateTimes.nowUtc().toString();
    IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv");
    final String interval = "2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z";
    DataSegment segment = createSegment(data, interval, version);
    Assert.assertTrue(segmentManager.loadSegment(segment, false, SegmentLazyLoadFailCallback.NOOP));
    Assert.assertTrue(joinableFactory.isDirectlyJoinable(dataSource));
    Optional<Joinable> maybeJoinable = makeJoinable(dataSource);
    Assert.assertTrue(maybeJoinable.isPresent());
    Joinable joinable = maybeJoinable.get();
    // cardinality currently tied to number of rows,
    Assert.assertEquals(1210, joinable.getCardinality("market"));
    Assert.assertEquals(1210, joinable.getCardinality("placement"));
    Assert.assertEquals(Optional.of(ImmutableSet.of("preferred")), joinable.getCorrelatedColumnValues("market", "spot", "placement", Long.MAX_VALUE, false));
    Optional<byte[]> bytes = joinableFactory.computeJoinCacheKey(dataSource, JOIN_CONDITION_ANALYSIS);
    Assert.assertTrue(bytes.isPresent());
    assertSegmentIdEquals(segment.getId(), bytes.get());
    // dropping the segment should make the table no longer available
    segmentManager.dropSegment(segment);
    maybeJoinable = makeJoinable(dataSource);
    Assert.assertFalse(maybeJoinable.isPresent());
    bytes = joinableFactory.computeJoinCacheKey(dataSource, JOIN_CONDITION_ANALYSIS);
    Assert.assertFalse(bytes.isPresent());
}
Also used : IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) Joinable(org.apache.druid.segment.join.Joinable) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) DataSegment(org.apache.druid.timeline.DataSegment) DataSource(org.apache.druid.query.DataSource) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 27 with DataSource

use of org.apache.druid.query.DataSource in project druid by druid-io.

the class DruidJoinQueryRel method toDruidQuery.

@Override
public DruidQuery toDruidQuery(final boolean finalizeAggregations) {
    final DruidRel<?> leftDruidRel = (DruidRel<?>) left;
    final DruidQuery leftQuery = Preconditions.checkNotNull((leftDruidRel).toDruidQuery(false), "leftQuery");
    final RowSignature leftSignature = leftQuery.getOutputRowSignature();
    final DataSource leftDataSource;
    final DruidRel<?> rightDruidRel = (DruidRel<?>) right;
    final DruidQuery rightQuery = Preconditions.checkNotNull(rightDruidRel.toDruidQuery(false), "rightQuery");
    final RowSignature rightSignature = rightQuery.getOutputRowSignature();
    final DataSource rightDataSource;
    if (computeLeftRequiresSubquery(leftDruidRel)) {
        leftDataSource = new QueryDataSource(leftQuery.getQuery());
        if (leftFilter != null) {
            throw new ISE("Filter on left table is supposed to be null if left child is a query source");
        }
    } else {
        leftDataSource = leftQuery.getDataSource();
    }
    if (computeRightRequiresSubquery(rightDruidRel)) {
        rightDataSource = new QueryDataSource(rightQuery.getQuery());
    } else {
        rightDataSource = rightQuery.getDataSource();
    }
    final Pair<String, RowSignature> prefixSignaturePair = computeJoinRowSignature(leftSignature, rightSignature);
    VirtualColumnRegistry virtualColumnRegistry = VirtualColumnRegistry.create(prefixSignaturePair.rhs, getPlannerContext().getExprMacroTable());
    getPlannerContext().setJoinExpressionVirtualColumnRegistry(virtualColumnRegistry);
    // Generate the condition for this join as a Druid expression.
    final DruidExpression condition = Expressions.toDruidExpression(getPlannerContext(), prefixSignaturePair.rhs, joinRel.getCondition());
    // Unsetting it to avoid any VC Registry leaks incase there are multiple druid quries for the SQL
    // It should be fixed soon with changes in interface for SqlOperatorConversion and Expressions bridge class
    getPlannerContext().setJoinExpressionVirtualColumnRegistry(null);
    // quiets static code analysis.
    if (condition == null) {
        throw new CannotBuildQueryException(joinRel, joinRel.getCondition());
    }
    return partialQuery.build(JoinDataSource.create(leftDataSource, rightDataSource, prefixSignaturePair.lhs, condition.getExpression(), toDruidJoinType(joinRel.getJoinType()), getDimFilter(getPlannerContext(), leftSignature, leftFilter), getPlannerContext().getExprMacroTable()), prefixSignaturePair.rhs, getPlannerContext(), getCluster().getRexBuilder(), finalizeAggregations, virtualColumnRegistry);
}
Also used : QueryDataSource(org.apache.druid.query.QueryDataSource) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) ISE(org.apache.druid.java.util.common.ISE) RowSignature(org.apache.druid.segment.column.RowSignature) JoinDataSource(org.apache.druid.query.JoinDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource)

Example 28 with DataSource

use of org.apache.druid.query.DataSource in project druid by druid-io.

the class DruidQuery method toScanQuery.

/**
 * Return this query as a Scan query, or null if this query is not compatible with Scan.
 *
 * @return query or null
 */
@Nullable
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (grouping != null) {
        // Scan cannot GROUP BY.
        return null;
    }
    if (outputRowSignature.size() == 0) {
        // Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
        throw new ISE("Cannot convert to Scan query without any columns.");
    }
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final List<ScanQuery.OrderBy> orderByColumns;
    long scanOffset = 0L;
    long scanLimit = 0L;
    if (sorting != null) {
        scanOffset = sorting.getOffsetLimit().getOffset();
        if (sorting.getOffsetLimit().hasLimit()) {
            final long limit = sorting.getOffsetLimit().getLimit();
            if (limit == 0) {
                // Can't handle zero limit (the Scan query engine would treat it as unlimited).
                return null;
            }
            scanLimit = limit;
        }
        orderByColumns = sorting.getOrderBys().stream().map(orderBy -> new ScanQuery.OrderBy(orderBy.getDimension(), orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING ? ScanQuery.Order.DESCENDING : ScanQuery.Order.ASCENDING)).collect(Collectors.toList());
    } else {
        orderByColumns = Collections.emptyList();
    }
    if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) && (orderByColumns.size() > 1 || orderByColumns.stream().anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
        // Cannot handle this ordering.
        // Scan cannot ORDER BY non-time columns.
        plannerContext.setPlanningError("SQL query requires order by non-time column %s that is not supported.", orderByColumns);
        return null;
    }
    // Compute the list of columns to select, sorted and deduped.
    final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
    orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
    return new ScanQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST, 0, scanOffset, scanLimit, null, orderByColumns, filtration.getDimFilter(), ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}
Also used : GroupByRules(org.apache.druid.sql.calcite.rule.GroupByRules) SortedSet(java.util.SortedSet) RowSignatures(org.apache.druid.sql.calcite.table.RowSignatures) OffsetLimit(org.apache.druid.sql.calcite.planner.OffsetLimit) Pair(org.apache.druid.java.util.common.Pair) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) RexNode(org.apache.calcite.rex.RexNode) Map(java.util.Map) DimFilterHavingSpec(org.apache.druid.query.groupby.having.DimFilterHavingSpec) InvertedTopNMetricSpec(org.apache.druid.query.topn.InvertedTopNMetricSpec) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) QueryFeatureInspector(org.apache.druid.sql.calcite.run.QueryFeatureInspector) SqlKind(org.apache.calcite.sql.SqlKind) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) DataSource(org.apache.druid.query.DataSource) TopNMetricSpec(org.apache.druid.query.topn.TopNMetricSpec) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) NumericTopNMetricSpec(org.apache.druid.query.topn.NumericTopNMetricSpec) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) ValueType(org.apache.druid.segment.column.ValueType) RelFieldCollation(org.apache.calcite.rel.RelFieldCollation) Collectors(java.util.stream.Collectors) RexInputRef(org.apache.calcite.rex.RexInputRef) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) DimFilter(org.apache.druid.query.filter.DimFilter) Sort(org.apache.calcite.rel.core.Sort) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) Optional(java.util.Optional) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) Expressions(org.apache.druid.sql.calcite.expression.Expressions) Project(org.apache.calcite.rel.core.Project) Iterables(com.google.common.collect.Iterables) Granularity(org.apache.druid.java.util.common.granularity.Granularity) HashMap(java.util.HashMap) DimensionTopNMetricSpec(org.apache.druid.query.topn.DimensionTopNMetricSpec) Filter(org.apache.calcite.rel.core.Filter) QueryFeature(org.apache.druid.sql.calcite.run.QueryFeature) Iterators(com.google.common.collect.Iterators) ScanQuery(org.apache.druid.query.scan.ScanQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HashSet(java.util.HashSet) ColumnHolder(org.apache.druid.segment.column.ColumnHolder) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) StringComparators(org.apache.druid.query.ordering.StringComparators) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) JoinDataSource(org.apache.druid.query.JoinDataSource) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) Nonnull(javax.annotation.Nonnull) ImmutableSortedMap(com.google.common.collect.ImmutableSortedMap) Nullable(javax.annotation.Nullable) Filtration(org.apache.druid.sql.calcite.filtration.Filtration) RelDataType(org.apache.calcite.rel.type.RelDataType) VirtualColumns(org.apache.druid.segment.VirtualColumns) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) StringComparator(org.apache.druid.query.ordering.StringComparator) RexBuilder(org.apache.calcite.rex.RexBuilder) VirtualColumn(org.apache.druid.segment.VirtualColumn) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Ints(com.google.common.primitives.Ints) Aggregate(org.apache.calcite.rel.core.Aggregate) Granularities(org.apache.druid.java.util.common.granularity.Granularities) IntList(it.unimi.dsi.fastutil.ints.IntList) Types(org.apache.druid.segment.column.Types) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) AggregateCall(org.apache.calcite.rel.core.AggregateCall) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Calcites(org.apache.druid.sql.calcite.planner.Calcites) Collections(java.util.Collections) Filtration(org.apache.druid.sql.calcite.filtration.Filtration) ScanQuery(org.apache.druid.query.scan.ScanQuery) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) TreeSet(java.util.TreeSet) ISE(org.apache.druid.java.util.common.ISE) Nullable(javax.annotation.Nullable)

Example 29 with DataSource

use of org.apache.druid.query.DataSource in project druid by druid-io.

the class DruidQuery method toTimeseriesQuery.

/**
 * Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
 *
 * @return query
 */
@Nullable
private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector) {
    if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES) || grouping == null || grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs()) || grouping.getHavingFilter() != null) {
        return null;
    }
    if (sorting != null && sorting.getOffsetLimit().hasOffset()) {
        // Timeseries cannot handle offsets.
        return null;
    }
    final Granularity queryGranularity;
    final boolean descending;
    int timeseriesLimit = 0;
    final Map<String, Object> theContext = new HashMap<>();
    if (grouping.getDimensions().isEmpty()) {
        queryGranularity = Granularities.ALL;
        descending = false;
    } else if (grouping.getDimensions().size() == 1) {
        final DimensionExpression dimensionExpression = Iterables.getOnlyElement(grouping.getDimensions());
        queryGranularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(), plannerContext.getExprMacroTable());
        if (queryGranularity == null) {
            // Timeseries only applies if the single dimension is granular __time.
            return null;
        }
        theContext.put(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec().getOutputName());
        if (sorting != null) {
            if (sorting.getOffsetLimit().hasLimit()) {
                final long limit = sorting.getOffsetLimit().getLimit();
                if (limit == 0) {
                    // Can't handle zero limit (the Timeseries query engine would treat it as unlimited).
                    return null;
                }
                timeseriesLimit = Ints.checkedCast(limit);
            }
            switch(sorting.getTimeSortKind(dimensionExpression.getOutputName())) {
                case UNORDERED:
                case TIME_ASCENDING:
                    descending = false;
                    break;
                case TIME_DESCENDING:
                    descending = true;
                    break;
                default:
                    // Sorting on a metric, maybe. Timeseries cannot handle.
                    return null;
            }
        } else {
            // No limitSpec.
            descending = false;
        }
    } else {
        // More than one dimension, timeseries cannot handle.
        return null;
    }
    // was originally a groupBy query, but with the grouping dimensions removed away in Grouping#applyProject
    if (!Granularities.ALL.equals(queryGranularity) || grouping.hasGroupingDimensionsDropped()) {
        theContext.put(TimeseriesQuery.SKIP_EMPTY_BUCKETS, true);
    }
    theContext.putAll(plannerContext.getQueryContext());
    final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
    final DataSource newDataSource = dataSourceFiltrationPair.lhs;
    final Filtration filtration = dataSourceFiltrationPair.rhs;
    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sorting != null && sorting.getProjection() != null) {
        postAggregators.addAll(sorting.getProjection().getPostAggregators());
    }
    return new TimeseriesQuery(newDataSource, filtration.getQuerySegmentSpec(), descending, getVirtualColumns(false), filtration.getDimFilter(), queryGranularity, grouping.getAggregatorFactories(), postAggregators, timeseriesLimit, ImmutableSortedMap.copyOf(theContext));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) HashMap(java.util.HashMap) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) DimensionExpression(org.apache.druid.sql.calcite.aggregation.DimensionExpression) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Nullable(javax.annotation.Nullable)

Example 30 with DataSource

use of org.apache.druid.query.DataSource in project druid by druid-io.

the class DruidQueryTest method test_filtration_intervalInQueryFilter.

@Test
public void test_filtration_intervalInQueryFilter() {
    DataSource dataSource = new TableDataSource("test");
    Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, filterWithInterval, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
    verify(pair, dataSource, selectorFilter, Intervals.utc(100, 200));
}
Also used : Filtration(org.apache.druid.sql.calcite.filtration.Filtration) TableDataSource(org.apache.druid.query.TableDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) Test(org.junit.Test)

Aggregations

DataSource (org.apache.druid.query.DataSource)36 TableDataSource (org.apache.druid.query.TableDataSource)23 Test (org.junit.Test)18 JoinDataSource (org.apache.druid.query.JoinDataSource)17 QueryDataSource (org.apache.druid.query.QueryDataSource)16 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)14 Filtration (org.apache.druid.sql.calcite.filtration.Filtration)12 ArrayList (java.util.ArrayList)10 InlineDataSource (org.apache.druid.query.InlineDataSource)7 HashMap (java.util.HashMap)6 Optional (java.util.Optional)6 LookupDataSource (org.apache.druid.query.LookupDataSource)6 UnionDataSource (org.apache.druid.query.UnionDataSource)6 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)6 List (java.util.List)5 Nullable (javax.annotation.Nullable)5 DimFilter (org.apache.druid.query.filter.DimFilter)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 IntArrayList (it.unimi.dsi.fastutil.ints.IntArrayList)4 ISE (org.apache.druid.java.util.common.ISE)4