Search in sources :

Example 1 with ResolvedFunction

use of io.trino.metadata.ResolvedFunction in project trino by trinodb.

the class LiteralEncoder method toExpression.

public Expression toExpression(Session session, Object object, Type type) {
    requireNonNull(type, "type is null");
    if (object instanceof Expression) {
        return (Expression) object;
    }
    if (object == null) {
        if (type.equals(UNKNOWN)) {
            return new NullLiteral();
        }
        return new Cast(new NullLiteral(), toSqlType(type), false, true);
    }
    checkArgument(Primitives.wrap(type.getJavaType()).isInstance(object), "object.getClass (%s) and type.getJavaType (%s) do not agree", object.getClass(), type.getJavaType());
    if (type.equals(TINYINT)) {
        return new GenericLiteral("TINYINT", object.toString());
    }
    if (type.equals(SMALLINT)) {
        return new GenericLiteral("SMALLINT", object.toString());
    }
    if (type.equals(INTEGER)) {
        return new LongLiteral(object.toString());
    }
    if (type.equals(BIGINT)) {
        LongLiteral expression = new LongLiteral(object.toString());
        if (expression.getValue() >= Integer.MIN_VALUE && expression.getValue() <= Integer.MAX_VALUE) {
            return new GenericLiteral("BIGINT", object.toString());
        }
        return new LongLiteral(object.toString());
    }
    if (type.equals(DOUBLE)) {
        Double value = (Double) object;
        if (value.isNaN()) {
            return FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("nan")).build();
        }
        if (value.equals(Double.NEGATIVE_INFINITY)) {
            return ArithmeticUnaryExpression.negative(FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("infinity")).build());
        }
        if (value.equals(Double.POSITIVE_INFINITY)) {
            return FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("infinity")).build();
        }
        return new DoubleLiteral(object.toString());
    }
    if (type.equals(REAL)) {
        Float value = intBitsToFloat(((Long) object).intValue());
        if (value.isNaN()) {
            return new Cast(FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("nan")).build(), toSqlType(REAL));
        }
        if (value.equals(Float.NEGATIVE_INFINITY)) {
            return ArithmeticUnaryExpression.negative(new Cast(FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("infinity")).build(), toSqlType(REAL)));
        }
        if (value.equals(Float.POSITIVE_INFINITY)) {
            return new Cast(FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("infinity")).build(), toSqlType(REAL));
        }
        return new GenericLiteral("REAL", value.toString());
    }
    if (type instanceof DecimalType) {
        String string;
        if (isShortDecimal(type)) {
            string = Decimals.toString((long) object, ((DecimalType) type).getScale());
        } else {
            string = Decimals.toString((Int128) object, ((DecimalType) type).getScale());
        }
        return new Cast(new DecimalLiteral(string), toSqlType(type));
    }
    if (type instanceof VarcharType) {
        VarcharType varcharType = (VarcharType) type;
        Slice value = (Slice) object;
        if (varcharType.isUnbounded()) {
            return new GenericLiteral("VARCHAR", value.toStringUtf8());
        }
        StringLiteral stringLiteral = new StringLiteral(value.toStringUtf8());
        int boundedLength = varcharType.getBoundedLength();
        int valueLength = SliceUtf8.countCodePoints(value);
        if (boundedLength == valueLength) {
            return stringLiteral;
        }
        if (boundedLength > valueLength) {
            return new Cast(stringLiteral, toSqlType(type), false, true);
        }
        throw new IllegalArgumentException(format("Value [%s] does not fit in type %s", value.toStringUtf8(), varcharType));
    }
    if (type instanceof CharType) {
        StringLiteral stringLiteral = new StringLiteral(((Slice) object).toStringUtf8());
        return new Cast(stringLiteral, toSqlType(type), false, true);
    }
    if (type.equals(BOOLEAN)) {
        return new BooleanLiteral(object.toString());
    }
    if (type.equals(DATE)) {
        return new GenericLiteral("DATE", new SqlDate(toIntExact((Long) object)).toString());
    }
    if (type instanceof TimestampType) {
        TimestampType timestampType = (TimestampType) type;
        String representation;
        if (timestampType.isShort()) {
            representation = TimestampToVarcharCast.cast(timestampType.getPrecision(), (Long) object).toStringUtf8();
        } else {
            representation = TimestampToVarcharCast.cast(timestampType.getPrecision(), (LongTimestamp) object).toStringUtf8();
        }
        return new TimestampLiteral(representation);
    }
    if (type instanceof TimestampWithTimeZoneType) {
        TimestampWithTimeZoneType timestampWithTimeZoneType = (TimestampWithTimeZoneType) type;
        String representation;
        if (timestampWithTimeZoneType.isShort()) {
            representation = TimestampWithTimeZoneToVarcharCast.cast(timestampWithTimeZoneType.getPrecision(), (long) object).toStringUtf8();
        } else {
            representation = TimestampWithTimeZoneToVarcharCast.cast(timestampWithTimeZoneType.getPrecision(), (LongTimestampWithTimeZone) object).toStringUtf8();
        }
        if (!object.equals(parseTimestampWithTimeZone(timestampWithTimeZoneType.getPrecision(), representation))) {
        // Certain (point in time, time zone) pairs cannot be represented as a TIMESTAMP literal, as the literal uses local date/time in given time zone.
        // Thus, during DST backwards change by e.g. 1 hour, the local time is "repeated" twice and thus one local date/time logically corresponds to two
        // points in time, leaving one of them non-referencable.
        // TODO (https://github.com/trinodb/trino/issues/5781) consider treating such values as illegal
        } else {
            return new TimestampLiteral(representation);
        }
    }
    // If the stack value is not a simple type, encode the stack value in a block
    if (!type.getJavaType().isPrimitive() && type.getJavaType() != Slice.class && type.getJavaType() != Block.class) {
        object = nativeValueToBlock(type, object);
    }
    if (object instanceof Block) {
        SliceOutput output = new DynamicSliceOutput(toIntExact(((Block) object).getSizeInBytes()));
        BlockSerdeUtil.writeBlock(plannerContext.getBlockEncodingSerde(), output, (Block) object);
        object = output.slice();
    // This if condition will evaluate to true: object instanceof Slice && !type.equals(VARCHAR)
    }
    Type argumentType = typeForMagicLiteral(type);
    Expression argument;
    if (object instanceof Slice) {
        // HACK: we need to serialize VARBINARY in a format that can be embedded in an expression to be
        // able to encode it in the plan that gets sent to workers.
        // We do this by transforming the in-memory varbinary into a call to from_base64(<base64-encoded value>)
        Slice encoded = VarbinaryFunctions.toBase64((Slice) object);
        argument = FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(QualifiedName.of("from_base64")).addArgument(VARCHAR, new StringLiteral(encoded.toStringUtf8())).build();
    } else {
        argument = toExpression(session, object, argumentType);
    }
    ResolvedFunction resolvedFunction = plannerContext.getMetadata().getCoercion(session, QualifiedName.of(LITERAL_FUNCTION_NAME), argumentType, type);
    return FunctionCallBuilder.resolve(session, plannerContext.getMetadata()).setName(resolvedFunction.toQualifiedName()).addArgument(argumentType, argument).build();
}
Also used : TimestampWithTimeZoneToVarcharCast(io.trino.operator.scalar.timestamptz.TimestampWithTimeZoneToVarcharCast) TimestampToVarcharCast(io.trino.operator.scalar.timestamp.TimestampToVarcharCast) Cast(io.trino.sql.tree.Cast) SliceOutput(io.airlift.slice.SliceOutput) DynamicSliceOutput(io.airlift.slice.DynamicSliceOutput) VarcharType(io.trino.spi.type.VarcharType) BooleanLiteral(io.trino.sql.tree.BooleanLiteral) GenericLiteral(io.trino.sql.tree.GenericLiteral) DecimalLiteral(io.trino.sql.tree.DecimalLiteral) TimestampWithTimeZoneType(io.trino.spi.type.TimestampWithTimeZoneType) TimestampType(io.trino.spi.type.TimestampType) DynamicSliceOutput(io.airlift.slice.DynamicSliceOutput) Int128(io.trino.spi.type.Int128) TimestampLiteral(io.trino.sql.tree.TimestampLiteral) LongLiteral(io.trino.sql.tree.LongLiteral) ResolvedFunction(io.trino.metadata.ResolvedFunction) Float.intBitsToFloat(java.lang.Float.intBitsToFloat) TimestampWithTimeZoneType(io.trino.spi.type.TimestampWithTimeZoneType) TypeSignatureTranslator.toSqlType(io.trino.sql.analyzer.TypeSignatureTranslator.toSqlType) DecimalType(io.trino.spi.type.DecimalType) Type(io.trino.spi.type.Type) TimestampType(io.trino.spi.type.TimestampType) VarcharType(io.trino.spi.type.VarcharType) CharType(io.trino.spi.type.CharType) StringLiteral(io.trino.sql.tree.StringLiteral) ArithmeticUnaryExpression(io.trino.sql.tree.ArithmeticUnaryExpression) Expression(io.trino.sql.tree.Expression) Slice(io.airlift.slice.Slice) SqlDate(io.trino.spi.type.SqlDate) DecimalType(io.trino.spi.type.DecimalType) Utils.nativeValueToBlock(io.trino.spi.predicate.Utils.nativeValueToBlock) Block(io.trino.spi.block.Block) DoubleLiteral(io.trino.sql.tree.DoubleLiteral) CharType(io.trino.spi.type.CharType) NullLiteral(io.trino.sql.tree.NullLiteral)

Example 2 with ResolvedFunction

use of io.trino.metadata.ResolvedFunction in project trino by trinodb.

the class PushPartialAggregationThroughExchange method split.

private PlanNode split(AggregationNode node, Context context) {
    // otherwise, add a partial and final with an exchange in between
    Map<Symbol, AggregationNode.Aggregation> intermediateAggregation = new HashMap<>();
    Map<Symbol, AggregationNode.Aggregation> finalAggregation = new HashMap<>();
    for (Map.Entry<Symbol, AggregationNode.Aggregation> entry : node.getAggregations().entrySet()) {
        AggregationNode.Aggregation originalAggregation = entry.getValue();
        ResolvedFunction resolvedFunction = originalAggregation.getResolvedFunction();
        AggregationFunctionMetadata functionMetadata = plannerContext.getMetadata().getAggregationFunctionMetadata(resolvedFunction);
        List<Type> intermediateTypes = functionMetadata.getIntermediateTypes().stream().map(plannerContext.getTypeManager()::getType).collect(toImmutableList());
        Type intermediateType = intermediateTypes.size() == 1 ? intermediateTypes.get(0) : RowType.anonymous(intermediateTypes);
        Symbol intermediateSymbol = context.getSymbolAllocator().newSymbol(resolvedFunction.getSignature().getName(), intermediateType);
        checkState(originalAggregation.getOrderingScheme().isEmpty(), "Aggregate with ORDER BY does not support partial aggregation");
        intermediateAggregation.put(intermediateSymbol, new AggregationNode.Aggregation(resolvedFunction, originalAggregation.getArguments(), originalAggregation.isDistinct(), originalAggregation.getFilter(), originalAggregation.getOrderingScheme(), originalAggregation.getMask()));
        // rewrite final aggregation in terms of intermediate function
        finalAggregation.put(entry.getKey(), new AggregationNode.Aggregation(resolvedFunction, ImmutableList.<Expression>builder().add(intermediateSymbol.toSymbolReference()).addAll(originalAggregation.getArguments().stream().filter(LambdaExpression.class::isInstance).collect(toImmutableList())).build(), false, Optional.empty(), Optional.empty(), Optional.empty()));
    }
    PlanNode partial = new AggregationNode(context.getIdAllocator().getNextId(), node.getSource(), intermediateAggregation, node.getGroupingSets(), // through the exchange may or may not preserve these properties. Hence, it is safest to drop preGroupedSymbols here.
    ImmutableList.of(), PARTIAL, node.getHashSymbol(), node.getGroupIdSymbol());
    return new AggregationNode(node.getId(), partial, finalAggregation, node.getGroupingSets(), // through the exchange may or may not preserve these properties. Hence, it is safest to drop preGroupedSymbols here.
    ImmutableList.of(), FINAL, node.getHashSymbol(), node.getGroupIdSymbol());
}
Also used : HashMap(java.util.HashMap) Symbol(io.trino.sql.planner.Symbol) ResolvedFunction(io.trino.metadata.ResolvedFunction) AggregationNode(io.trino.sql.planner.plan.AggregationNode) AggregationFunctionMetadata(io.trino.metadata.AggregationFunctionMetadata) SystemSessionProperties.preferPartialAggregation(io.trino.SystemSessionProperties.preferPartialAggregation) Type(io.trino.spi.type.Type) RowType(io.trino.spi.type.RowType) PlanNode(io.trino.sql.planner.plan.PlanNode) HashMap(java.util.HashMap) Map(java.util.Map)

Example 3 with ResolvedFunction

use of io.trino.metadata.ResolvedFunction in project trino by trinodb.

the class PruneCountAggregationOverScalar method apply.

@Override
public Result apply(AggregationNode parent, Captures captures, Context context) {
    if (!parent.hasDefaultOutput() || parent.getOutputSymbols().size() != 1) {
        return Result.empty();
    }
    FunctionId countFunctionId = metadata.resolveFunction(context.getSession(), QualifiedName.of("count"), ImmutableList.of()).getFunctionId();
    Map<Symbol, AggregationNode.Aggregation> assignments = parent.getAggregations();
    for (Map.Entry<Symbol, AggregationNode.Aggregation> entry : assignments.entrySet()) {
        AggregationNode.Aggregation aggregation = entry.getValue();
        requireNonNull(aggregation, "aggregation is null");
        ResolvedFunction resolvedFunction = aggregation.getResolvedFunction();
        if (!countFunctionId.equals(resolvedFunction.getFunctionId())) {
            return Result.empty();
        }
    }
    if (!assignments.isEmpty() && isScalar(parent.getSource(), context.getLookup())) {
        return Result.ofPlanNode(new ValuesNode(parent.getId(), parent.getOutputSymbols(), ImmutableList.of(new Row(ImmutableList.of(new GenericLiteral("BIGINT", "1"))))));
    }
    return Result.empty();
}
Also used : FunctionId(io.trino.metadata.FunctionId) ValuesNode(io.trino.sql.planner.plan.ValuesNode) Symbol(io.trino.sql.planner.Symbol) ResolvedFunction(io.trino.metadata.ResolvedFunction) AggregationNode(io.trino.sql.planner.plan.AggregationNode) Row(io.trino.sql.tree.Row) Map(java.util.Map) GenericLiteral(io.trino.sql.tree.GenericLiteral)

Example 4 with ResolvedFunction

use of io.trino.metadata.ResolvedFunction in project trino by trinodb.

the class RewriteSpatialPartitioningAggregation method apply.

@Override
public Result apply(AggregationNode node, Captures captures, Context context) {
    ResolvedFunction spatialPartitioningFunction = plannerContext.getMetadata().resolveFunction(context.getSession(), QualifiedName.of(NAME), fromTypeSignatures(GEOMETRY_TYPE_SIGNATURE, INTEGER.getTypeSignature()));
    ResolvedFunction stEnvelopeFunction = plannerContext.getMetadata().resolveFunction(context.getSession(), QualifiedName.of("ST_Envelope"), fromTypeSignatures(GEOMETRY_TYPE_SIGNATURE));
    ImmutableMap.Builder<Symbol, Aggregation> aggregations = ImmutableMap.builder();
    Symbol partitionCountSymbol = context.getSymbolAllocator().newSymbol("partition_count", INTEGER);
    ImmutableMap.Builder<Symbol, Expression> envelopeAssignments = ImmutableMap.builder();
    for (Map.Entry<Symbol, Aggregation> entry : node.getAggregations().entrySet()) {
        Aggregation aggregation = entry.getValue();
        String name = aggregation.getResolvedFunction().getSignature().getName();
        if (name.equals(NAME) && aggregation.getArguments().size() == 1) {
            Expression geometry = getOnlyElement(aggregation.getArguments());
            Symbol envelopeSymbol = context.getSymbolAllocator().newSymbol("envelope", plannerContext.getTypeManager().getType(GEOMETRY_TYPE_SIGNATURE));
            if (isStEnvelopeFunctionCall(geometry, stEnvelopeFunction)) {
                envelopeAssignments.put(envelopeSymbol, geometry);
            } else {
                envelopeAssignments.put(envelopeSymbol, FunctionCallBuilder.resolve(context.getSession(), plannerContext.getMetadata()).setName(QualifiedName.of("ST_Envelope")).addArgument(GEOMETRY_TYPE_SIGNATURE, geometry).build());
            }
            aggregations.put(entry.getKey(), new Aggregation(spatialPartitioningFunction, ImmutableList.of(envelopeSymbol.toSymbolReference(), partitionCountSymbol.toSymbolReference()), false, Optional.empty(), Optional.empty(), aggregation.getMask()));
        } else {
            aggregations.put(entry);
        }
    }
    return Result.ofPlanNode(new AggregationNode(node.getId(), new ProjectNode(context.getIdAllocator().getNextId(), node.getSource(), Assignments.builder().putIdentities(node.getSource().getOutputSymbols()).put(partitionCountSymbol, new LongLiteral(Integer.toString(getHashPartitionCount(context.getSession())))).putAll(envelopeAssignments.buildOrThrow()).build()), aggregations.buildOrThrow(), node.getGroupingSets(), node.getPreGroupedSymbols(), node.getStep(), node.getHashSymbol(), node.getGroupIdSymbol()));
}
Also used : LongLiteral(io.trino.sql.tree.LongLiteral) ResolvedFunction(io.trino.metadata.ResolvedFunction) Symbol(io.trino.sql.planner.Symbol) AggregationNode(io.trino.sql.planner.plan.AggregationNode) ImmutableMap(com.google.common.collect.ImmutableMap) Aggregation(io.trino.sql.planner.plan.AggregationNode.Aggregation) Expression(io.trino.sql.tree.Expression) ProjectNode(io.trino.sql.planner.plan.ProjectNode) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 5 with ResolvedFunction

use of io.trino.metadata.ResolvedFunction in project trino by trinodb.

the class SimplifyCountOverConstant method apply.

@Override
public Result apply(AggregationNode parent, Captures captures, Context context) {
    ProjectNode child = captures.get(CHILD);
    boolean changed = false;
    Map<Symbol, AggregationNode.Aggregation> aggregations = new LinkedHashMap<>(parent.getAggregations());
    ResolvedFunction countFunction = plannerContext.getMetadata().resolveFunction(context.getSession(), QualifiedName.of("count"), ImmutableList.of());
    for (Entry<Symbol, AggregationNode.Aggregation> entry : parent.getAggregations().entrySet()) {
        Symbol symbol = entry.getKey();
        AggregationNode.Aggregation aggregation = entry.getValue();
        if (isCountOverConstant(context.getSession(), aggregation, child.getAssignments())) {
            changed = true;
            aggregations.put(symbol, new AggregationNode.Aggregation(countFunction, ImmutableList.of(), false, Optional.empty(), Optional.empty(), aggregation.getMask()));
        }
    }
    if (!changed) {
        return Result.empty();
    }
    return Result.ofPlanNode(new AggregationNode(parent.getId(), child, aggregations, parent.getGroupingSets(), ImmutableList.of(), parent.getStep(), parent.getHashSymbol(), parent.getGroupIdSymbol()));
}
Also used : Symbol(io.trino.sql.planner.Symbol) ResolvedFunction(io.trino.metadata.ResolvedFunction) ProjectNode(io.trino.sql.planner.plan.ProjectNode) AggregationNode(io.trino.sql.planner.plan.AggregationNode) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

ResolvedFunction (io.trino.metadata.ResolvedFunction)51 Test (org.testng.annotations.Test)31 WindowNode (io.trino.sql.planner.plan.WindowNode)21 ImmutableList (com.google.common.collect.ImmutableList)20 ImmutableMap (com.google.common.collect.ImmutableMap)20 Symbol (io.trino.sql.planner.Symbol)20 TypeSignatureProvider.fromTypes (io.trino.sql.analyzer.TypeSignatureProvider.fromTypes)18 DEFAULT_FRAME (io.trino.sql.planner.plan.WindowNode.Frame.DEFAULT_FRAME)18 QualifiedName (io.trino.sql.tree.QualifiedName)18 Optional (java.util.Optional)18 OrderingScheme (io.trino.sql.planner.OrderingScheme)17 PlanMatchPattern.values (io.trino.sql.planner.assertions.PlanMatchPattern.values)17 BaseRuleTest (io.trino.sql.planner.iterative.rule.test.BaseRuleTest)17 ComparisonExpression (io.trino.sql.tree.ComparisonExpression)15 FunctionCall (io.trino.sql.tree.FunctionCall)14 SortOrder (io.trino.spi.connector.SortOrder)12 Type (io.trino.spi.type.Type)12 Expression (io.trino.sql.tree.Expression)11 BIGINT (io.trino.spi.type.BigintType.BIGINT)10 PlanMatchPattern.topNRanking (io.trino.sql.planner.assertions.PlanMatchPattern.topNRanking)9