Search in sources :

Example 16 with RowSignature

use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.

the class TimeseriesQueryQueryToolChest method getNullTimeseriesResultValue.

private Result<TimeseriesResultValue> getNullTimeseriesResultValue(TimeseriesQuery query) {
    List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
    Aggregator[] aggregators = new Aggregator[aggregatorSpecs.size()];
    String[] aggregatorNames = new String[aggregatorSpecs.size()];
    RowSignature aggregatorsSignature = RowSignature.builder().addAggregators(aggregatorSpecs, RowSignature.Finalization.UNKNOWN).build();
    for (int i = 0; i < aggregatorSpecs.size(); i++) {
        aggregators[i] = aggregatorSpecs.get(i).factorize(RowBasedColumnSelectorFactory.create(RowAdapters.standardRow(), () -> new MapBasedRow(null, null), aggregatorsSignature, false));
        aggregatorNames[i] = aggregatorSpecs.get(i).getName();
    }
    final DateTime start = query.getIntervals().isEmpty() ? DateTimes.EPOCH : query.getIntervals().get(0).getStart();
    TimeseriesResultBuilder bob = new TimeseriesResultBuilder(start);
    for (int i = 0; i < aggregatorSpecs.size(); i++) {
        bob.addMetric(aggregatorNames[i], aggregators[i].get());
        aggregators[i].close();
    }
    return bob.build();
}
Also used : MapBasedRow(org.apache.druid.data.input.MapBasedRow) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) Aggregator(org.apache.druid.query.aggregation.Aggregator) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) RowSignature(org.apache.druid.segment.column.RowSignature) DateTime(org.joda.time.DateTime)

Example 17 with RowSignature

use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.

the class ScanQueryQueryToolChest method resultArraySignature.

@Override
public RowSignature resultArraySignature(final ScanQuery query) {
    if (query.getColumns() == null || query.getColumns().isEmpty()) {
        // will include none of them.
        return RowSignature.empty();
    } else {
        final RowSignature.Builder builder = RowSignature.builder();
        if (query.withNonNullLegacy(scanQueryConfig).isLegacy()) {
            builder.add(ScanQueryEngine.LEGACY_TIMESTAMP_KEY, null);
        }
        for (String columnName : query.getColumns()) {
            // With the Scan query we only know the columnType for virtual columns. Let's report those, at least.
            final ColumnType columnType;
            final VirtualColumn virtualColumn = query.getVirtualColumns().getVirtualColumn(columnName);
            if (virtualColumn != null) {
                columnType = virtualColumn.capabilities(columnName).toColumnType();
            } else {
                // Unknown type. In the future, it would be nice to have a way to fill these in.
                columnType = null;
            }
            builder.add(columnName, columnType);
        }
        return builder.build();
    }
}
Also used : ColumnType(org.apache.druid.segment.column.ColumnType) VirtualColumn(org.apache.druid.segment.VirtualColumn) RowSignature(org.apache.druid.segment.column.RowSignature)

Example 18 with RowSignature

use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.

the class ClientQuerySegmentWalker method toInlineDataSource.

/**
 * Convert the results of a particular query into a materialized (List-based) InlineDataSource.
 *
 * @param query            the query
 * @param results          query results
 * @param toolChest        toolchest for the query
 * @param limitAccumulator an accumulator for tracking the number of accumulated rows in all subqueries for a
 *                         particular master query
 * @param limit            user-configured limit. If negative, will be treated as {@link Integer#MAX_VALUE}.
 *                         If zero, this method will throw an error immediately.
 * @throws ResourceLimitExceededException if the limit is exceeded
 */
private static <T, QueryType extends Query<T>> InlineDataSource toInlineDataSource(final QueryType query, final Sequence<T> results, final QueryToolChest<T, QueryType> toolChest, final AtomicInteger limitAccumulator, final int limit) {
    final int limitToUse = limit < 0 ? Integer.MAX_VALUE : limit;
    if (limitAccumulator.get() >= limitToUse) {
        throw ResourceLimitExceededException.withMessage("Cannot issue subquery, maximum[%d] reached", limitToUse);
    }
    final RowSignature signature = toolChest.resultArraySignature(query);
    final List<Object[]> resultList = new ArrayList<>();
    toolChest.resultsAsArrays(query, results).accumulate(resultList, (acc, in) -> {
        if (limitAccumulator.getAndIncrement() >= limitToUse) {
            throw ResourceLimitExceededException.withMessage("Subquery generated results beyond maximum[%d]", limitToUse);
        }
        acc.add(in);
        return acc;
    });
    return InlineDataSource.fromIterable(resultList, signature);
}
Also used : ArrayList(java.util.ArrayList) RowSignature(org.apache.druid.segment.column.RowSignature)

Example 19 with RowSignature

use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.

the class ArrayConcatSqlAggregator method toDruidAggregation.

@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
    final List<RexNode> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).collect(Collectors.toList());
    Integer maxSizeBytes = null;
    if (arguments.size() > 1) {
        RexNode maxBytes = arguments.get(1);
        if (!maxBytes.isA(SqlKind.LITERAL)) {
            // maxBytes must be a literal
            return null;
        }
        maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
    }
    final DruidExpression arg = Expressions.toDruidExpression(plannerContext, rowSignature, arguments.get(0));
    final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
    final String fieldName;
    final ColumnType druidType = Calcites.getValueTypeForRelDataTypeFull(aggregateCall.getType());
    if (druidType == null || !druidType.isArray()) {
        // must be an array
        return null;
    }
    final String initialvalue = ExpressionType.fromColumnTypeStrict(druidType).asTypeString() + "[]";
    if (arg.isDirectColumnAccess()) {
        fieldName = arg.getDirectColumn();
    } else {
        VirtualColumn vc = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(plannerContext, arg, druidType);
        fieldName = vc.getOutputName();
    }
    if (aggregateCall.isDistinct()) {
        return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
    } else {
        return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_concat(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
    }
}
Also used : Project(org.apache.calcite.rel.core.Project) SqlAggregator(org.apache.druid.sql.calcite.aggregation.SqlAggregator) ReturnTypes(org.apache.calcite.sql.type.ReturnTypes) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) Optionality(org.apache.calcite.util.Optionality) RexNode(org.apache.calcite.rex.RexNode) ExpressionType(org.apache.druid.math.expr.ExpressionType) VirtualColumnRegistry(org.apache.druid.sql.calcite.rel.VirtualColumnRegistry) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) Nullable(javax.annotation.Nullable) ImmutableSet(com.google.common.collect.ImmutableSet) SqlKind(org.apache.calcite.sql.SqlKind) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) InferTypes(org.apache.calcite.sql.type.InferTypes) RexBuilder(org.apache.calcite.rex.RexBuilder) RexLiteral(org.apache.calcite.rex.RexLiteral) VirtualColumn(org.apache.druid.segment.VirtualColumn) SqlFunctionCategory(org.apache.calcite.sql.SqlFunctionCategory) StringUtils(org.apache.druid.java.util.common.StringUtils) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Collectors(java.util.stream.Collectors) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) List(java.util.List) RowSignature(org.apache.druid.segment.column.RowSignature) OperandTypes(org.apache.calcite.sql.type.OperandTypes) ColumnType(org.apache.druid.segment.column.ColumnType) AggregateCall(org.apache.calcite.rel.core.AggregateCall) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) Calcites(org.apache.druid.sql.calcite.planner.Calcites) Expressions(org.apache.druid.sql.calcite.expression.Expressions) ColumnType(org.apache.druid.segment.column.ColumnType) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) VirtualColumn(org.apache.druid.segment.VirtualColumn) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) RexNode(org.apache.calcite.rex.RexNode) Nullable(javax.annotation.Nullable)

Example 20 with RowSignature

use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.

the class TimeArithmeticOperatorConversion method toDruidExpression.

@Override
public DruidExpression toDruidExpression(final PlannerContext plannerContext, final RowSignature rowSignature, final RexNode rexNode) {
    final RexCall call = (RexCall) rexNode;
    final List<RexNode> operands = call.getOperands();
    if (operands.size() != 2) {
        throw new IAE("Expected 2 args, got %s", operands.size());
    }
    final RexNode leftRexNode = operands.get(0);
    final RexNode rightRexNode = operands.get(1);
    final DruidExpression leftExpr = Expressions.toDruidExpression(plannerContext, rowSignature, leftRexNode);
    final DruidExpression rightExpr = Expressions.toDruidExpression(plannerContext, rowSignature, rightRexNode);
    if (leftExpr == null || rightExpr == null) {
        return null;
    }
    final ColumnType outputType = Calcites.getColumnTypeForRelDataType(rexNode.getType());
    if (rightRexNode.getType().getFamily() == SqlTypeFamily.INTERVAL_YEAR_MONTH) {
        // Period is a value in months.
        return DruidExpression.ofExpression(outputType, DruidExpression.functionCall("timestamp_shift"), ImmutableList.of(leftExpr, rightExpr.map(simpleExtraction -> null, expression -> rightRexNode.isA(SqlKind.LITERAL) ? StringUtils.format("'P%sM'", RexLiteral.value(rightRexNode)) : StringUtils.format("concat('P', %s, 'M')", expression)), DruidExpression.ofLiteral(ColumnType.LONG, DruidExpression.numberLiteral(direction > 0 ? 1 : -1)), DruidExpression.ofStringLiteral(plannerContext.getTimeZone().getID())));
    } else if (rightRexNode.getType().getFamily() == SqlTypeFamily.INTERVAL_DAY_TIME) {
        // Period is a value in milliseconds. Ignore time zone.
        return DruidExpression.ofExpression(outputType, (args) -> StringUtils.format("(%s %s %s)", args.get(0).getExpression(), direction > 0 ? "+" : "-", args.get(1).getExpression()), ImmutableList.of(leftExpr, rightExpr));
    } else if ((leftRexNode.getType().getFamily() == SqlTypeFamily.TIMESTAMP || leftRexNode.getType().getFamily() == SqlTypeFamily.DATE) && (rightRexNode.getType().getFamily() == SqlTypeFamily.TIMESTAMP || rightRexNode.getType().getFamily() == SqlTypeFamily.DATE)) {
        // Calcite represents both TIMESTAMP - INTERVAL and TIMESTAMPDIFF (TIMESTAMP - TIMESTAMP)
        // with a MINUS_DATE operator, so we must tell which case we're in by checking the type of
        // the second argument.
        Preconditions.checkState(direction < 0, "Time arithmetic require direction < 0");
        if (call.getType().getFamily() == SqlTypeFamily.INTERVAL_YEAR_MONTH) {
            return DruidExpression.ofExpression(outputType, DruidExpression.functionCall("subtract_months"), ImmutableList.of(leftExpr, rightExpr, DruidExpression.ofStringLiteral(plannerContext.getTimeZone().getID())));
        } else {
            return DruidExpression.ofExpression(outputType, (args) -> StringUtils.format("(%s %s %s)", args.get(0).getExpression(), "-", args.get(1).getExpression()), ImmutableList.of(leftExpr, rightExpr));
        }
    } else {
        // Shouldn't happen if subclasses are behaving.
        throw new ISE("Got unexpected type period type family[%s]", rightRexNode.getType().getFamily());
    }
}
Also used : RexCall(org.apache.calcite.rex.RexCall) SqlKind(org.apache.calcite.sql.SqlKind) SqlTypeFamily(org.apache.calcite.sql.type.SqlTypeFamily) SqlOperatorConversion(org.apache.druid.sql.calcite.expression.SqlOperatorConversion) RexLiteral(org.apache.calcite.rex.RexLiteral) StringUtils(org.apache.druid.java.util.common.StringUtils) ISE(org.apache.druid.java.util.common.ISE) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) RexNode(org.apache.calcite.rex.RexNode) RowSignature(org.apache.druid.segment.column.RowSignature) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) SqlOperator(org.apache.calcite.sql.SqlOperator) IAE(org.apache.druid.java.util.common.IAE) Calcites(org.apache.druid.sql.calcite.planner.Calcites) RexCall(org.apache.calcite.rex.RexCall) Expressions(org.apache.druid.sql.calcite.expression.Expressions) ColumnType(org.apache.druid.segment.column.ColumnType) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) ISE(org.apache.druid.java.util.common.ISE) IAE(org.apache.druid.java.util.common.IAE) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

RowSignature (org.apache.druid.segment.column.RowSignature)46 ColumnType (org.apache.druid.segment.column.ColumnType)17 List (java.util.List)14 Test (org.junit.Test)13 Collectors (java.util.stream.Collectors)12 Nullable (javax.annotation.Nullable)11 PlannerContext (org.apache.druid.sql.calcite.planner.PlannerContext)11 DruidExpression (org.apache.druid.sql.calcite.expression.DruidExpression)10 Expressions (org.apache.druid.sql.calcite.expression.Expressions)10 Project (org.apache.calcite.rel.core.Project)9 RexLiteral (org.apache.calcite.rex.RexLiteral)9 RexNode (org.apache.calcite.rex.RexNode)9 SqlKind (org.apache.calcite.sql.SqlKind)9 ISE (org.apache.druid.java.util.common.ISE)9 Aggregation (org.apache.druid.sql.calcite.aggregation.Aggregation)9 ArrayList (java.util.ArrayList)8 StringUtils (org.apache.druid.java.util.common.StringUtils)8 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)8 ImmutableList (com.google.common.collect.ImmutableList)6 ImmutableSet (com.google.common.collect.ImmutableSet)6