Search in sources :

Example 1 with TableWriterNode

use of io.prestosql.sql.planner.plan.TableWriterNode in project hetu-core by openlookeng.

the class TestExternalFunctionPushDownChecker method testTableWriterNodeWithExternalCall.

@Test(expectedExceptions = ExternalFunctionPushDownChecker.IllegalExternalFunctionUsageException.class, expectedExceptionsMessageRegExp = "The external function jdbc.v1.foo does not support to push down to data source for this query.")
public void testTableWriterNodeWithExternalCall() {
    TableWriterNode.DeleteTarget deleteTarget = new TableWriterNode.DeleteTarget(tableHandle, new SchemaTableName("sch", "tab"));
    PlanNode node = new TableWriterNode(idAllocator.getNextId(), builder.values(), deleteTarget, columnA, columnB, ImmutableList.of(columnA, columnB), ImmutableList.of("a", "b"), Optional.empty(), Optional.of(new StatisticAggregations(ImmutableMap.of(columnA, new AggregationNode.Aggregation(externalFooCall1, externalFooCall1.getArguments(), false, Optional.empty(), Optional.empty(), Optional.empty())), ImmutableList.of(columnA))), Optional.of(new StatisticAggregationsDescriptor<>(ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of())));
    validatePlan(node);
}
Also used : PlanNode(io.prestosql.spi.plan.PlanNode) StatisticAggregationsDescriptor(io.prestosql.sql.planner.plan.StatisticAggregationsDescriptor) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) StatisticAggregations(io.prestosql.sql.planner.plan.StatisticAggregations) Test(org.testng.annotations.Test) BasePlanTest(io.prestosql.sql.planner.assertions.BasePlanTest)

Example 2 with TableWriterNode

use of io.prestosql.sql.planner.plan.TableWriterNode in project hetu-core by openlookeng.

the class TestExternalFunctionPushDownChecker method testTableWriterNode.

@Test
public void testTableWriterNode() {
    TableWriterNode.DeleteTarget deleteTarget = new TableWriterNode.DeleteTarget(tableHandle, new SchemaTableName("sch", "tab"));
    PlanNode node = new TableWriterNode(idAllocator.getNextId(), builder.values(), deleteTarget, columnA, columnB, ImmutableList.of(columnA, columnB), ImmutableList.of("a", "b"), Optional.empty(), Optional.of(new StatisticAggregations(ImmutableMap.of(columnA, new AggregationNode.Aggregation(sumCall, sumCall.getArguments(), false, Optional.empty(), Optional.empty(), Optional.empty())), ImmutableList.of(columnA))), Optional.of(new StatisticAggregationsDescriptor<>(ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of())));
    validatePlan(node);
}
Also used : PlanNode(io.prestosql.spi.plan.PlanNode) StatisticAggregationsDescriptor(io.prestosql.sql.planner.plan.StatisticAggregationsDescriptor) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) StatisticAggregations(io.prestosql.sql.planner.plan.StatisticAggregations) Test(org.testng.annotations.Test) BasePlanTest(io.prestosql.sql.planner.assertions.BasePlanTest)

Example 3 with TableWriterNode

use of io.prestosql.sql.planner.plan.TableWriterNode in project hetu-core by openlookeng.

the class LogicalPlanner method createTableWriterPlan.

private RelationPlan createTableWriterPlan(Analysis analysis, RelationPlan plan, WriterTarget target, List<String> columnNames, Optional<NewTableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
    PlanNode source = plan.getRoot();
    if (!analysis.isCreateTableAsSelectWithData()) {
        source = new LimitNode(idAllocator.getNextId(), source, 0L, false);
    }
    // todo this should be checked in analysis
    writeTableLayout.ifPresent(layout -> {
        if (!ImmutableSet.copyOf(columnNames).containsAll(layout.getPartitionColumns())) {
            throw new PrestoException(NOT_SUPPORTED, "INSERT must write all distribution columns: " + layout.getPartitionColumns());
        }
    });
    List<Symbol> symbols = plan.getFieldMappings();
    Optional<PartitioningScheme> partitioningScheme = Optional.empty();
    if (writeTableLayout.isPresent()) {
        List<Symbol> partitionFunctionArguments = new ArrayList<>();
        writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
        List<Symbol> outputLayout = new ArrayList<>(symbols);
        PartitioningHandle partitioningHandle = writeTableLayout.get().getPartitioning().orElse(FIXED_HASH_DISTRIBUTION);
        partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle, partitionFunctionArguments), outputLayout));
    }
    if (!statisticsMetadata.isEmpty()) {
        verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
        Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
        TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
        StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(planSymbolAllocator, metadata);
        // partial aggregation is run within the TableWriteOperator to calculate the statistics for
        // the data consumed by the TableWriteOperator
        // final aggregation is run within the TableFinishOperator to summarize collected statistics
        // by the partial aggregation from all of the writer nodes
        StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
        PlanNode writerNode = new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get)));
        TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), writerNode, target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
        return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
    }
    TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.empty(), Optional.empty()), target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
    return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
Also used : TableStatisticAggregation(io.prestosql.sql.planner.StatisticsAggregationPlanner.TableStatisticAggregation) Symbol(io.prestosql.spi.plan.Symbol) ArrayList(java.util.ArrayList) PrestoException(io.prestosql.spi.PrestoException) TableFinishNode(io.prestosql.sql.planner.plan.TableFinishNode) StatisticAggregations(io.prestosql.sql.planner.plan.StatisticAggregations) PlanNode(io.prestosql.spi.plan.PlanNode) LimitNode(io.prestosql.spi.plan.LimitNode) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode)

Example 4 with TableWriterNode

use of io.prestosql.sql.planner.plan.TableWriterNode in project hetu-core by openlookeng.

the class PushDeleteAsInsertIntoConnector method apply.

@Override
public Result apply(TableFinishNode node, Captures captures, Context context) {
    if (!withFilter) {
        TableScanNode tableScan = captures.get(TABLE_SCAN);
        return metadata.applyDelete(context.getSession(), tableScan.getTable()).map(newHandle -> new TableDeleteNode(context.getIdAllocator().getNextId(), newHandle, getOnlyElement(node.getOutputSymbols()))).map(Result::ofPlanNode).orElseGet(Result::empty);
    }
    TableWriterNode writerNode = captures.get(WRITER_NODE);
    TableWriterNode.DeleteAsInsertReference deleteTargetRef = (TableWriterNode.DeleteAsInsertReference) writerNode.getTarget();
    if (!deleteTargetRef.getConstraint().isPresent()) {
        // Not expected to reach here.
        return Result.empty();
    }
    Expression predicate = deleteTargetRef.getConstraint().get();
    Expression filtered = ExpressionUtils.filterDeterministicConjuncts(predicate);
    if (!predicate.equals(filtered)) {
        // There were some non-deterministic filters.. so cannot directly delete
        return Result.empty();
    }
    Set<Symbol> allPredicateSymbols = SymbolsExtractor.extractUnique(predicate);
    Map<Symbol, ColumnHandle> columnAssignments = deleteTargetRef.getColumnAssignments();
    Set<Symbol> allColumns = columnAssignments.keySet();
    List<Symbol> predicateColumnSymbols = allPredicateSymbols.stream().filter(allColumns::contains).distinct().collect(Collectors.toList());
    // If all predicate symbols are partitionColumns, then only partition can be deleted directly.
    if (predicateColumnSymbols.isEmpty() || !predicateColumnSymbols.stream().allMatch(symbol -> {
        ColumnHandle columnHandle = columnAssignments.get(symbol);
        return columnHandle != null && columnHandle.isPartitionKey();
    })) {
        return Result.empty();
    }
    FilterNode filterNode = captures.get(FILTER);
    List<Symbol> nonTableSymbols = allPredicateSymbols.stream().filter(symbol -> !allColumns.contains(symbol)).collect(Collectors.toList());
    PredicateContext predicateContext = new PredicateContext();
    PlanNode rewrittenSource = SimplePlanRewriter.rewriteWith(new ReWriter(columnAssignments.keySet(), nonTableSymbols, context.getLookup(), logicalRowExpressions), filterNode, predicateContext);
    /**
     * Create the TableDeleteNode with source to evaluate the predicate subqueries
     */
    TableDeleteNode tableDeleteNode = new TableDeleteNode(context.getIdAllocator().getNextId(), rewrittenSource, Optional.of(predicateContext.tablePredicate), deleteTargetRef.getHandle(), deleteTargetRef.getColumnAssignments(), getOnlyElement(node.getOutputSymbols()));
    return Result.ofPlanNode(tableDeleteNode);
}
Also used : TableDeleteNode(io.prestosql.sql.planner.plan.TableDeleteNode) Pattern.typeOf(io.prestosql.matching.Pattern.typeOf) Lookup(io.prestosql.sql.planner.iterative.Lookup) LogicalRowExpressions(io.prestosql.expressions.LogicalRowExpressions) TableFinishNode(io.prestosql.sql.planner.plan.TableFinishNode) Capture.newCapture(io.prestosql.matching.Capture.newCapture) FilterNode(io.prestosql.spi.plan.FilterNode) Map(java.util.Map) OriginalExpressionUtils.castToRowExpression(io.prestosql.sql.relational.OriginalExpressionUtils.castToRowExpression) Patterns.tableWriterNode(io.prestosql.sql.planner.plan.Patterns.tableWriterNode) Patterns.tableFinish(io.prestosql.sql.planner.plan.Patterns.tableFinish) RowExpressionDeterminismEvaluator(io.prestosql.sql.relational.RowExpressionDeterminismEvaluator) SymbolsExtractor(io.prestosql.sql.planner.SymbolsExtractor) TableScanNode(io.prestosql.spi.plan.TableScanNode) Set(java.util.Set) PlanNode(io.prestosql.spi.plan.PlanNode) ProjectNode(io.prestosql.spi.plan.ProjectNode) Collectors(java.util.stream.Collectors) Metadata(io.prestosql.metadata.Metadata) Captures(io.prestosql.matching.Captures) SymbolUtils.toSymbolReference(io.prestosql.sql.planner.SymbolUtils.toSymbolReference) List(java.util.List) ExpressionUtils(io.prestosql.sql.ExpressionUtils) FunctionResolution(io.prestosql.sql.relational.FunctionResolution) Capture(io.prestosql.matching.Capture) Optional(java.util.Optional) Patterns.source(io.prestosql.sql.planner.plan.Patterns.source) TableWriter.target(io.prestosql.sql.planner.plan.Patterns.TableWriter.target) DELETE_TRANSACTIONAL_TABLE_DIRECT(io.prestosql.SystemSessionProperties.DELETE_TRANSACTIONAL_TABLE_DIRECT) Pattern(io.prestosql.matching.Pattern) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) Session(io.prestosql.Session) SimplePlanRewriter(io.prestosql.sql.planner.plan.SimplePlanRewriter) Patterns.project(io.prestosql.sql.planner.plan.Patterns.project) JoinNode(io.prestosql.spi.plan.JoinNode) Symbol(io.prestosql.spi.plan.Symbol) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode) Assignments(io.prestosql.spi.plan.Assignments) Rule(io.prestosql.sql.planner.iterative.Rule) GroupReference(io.prestosql.spi.plan.GroupReference) Patterns.filter(io.prestosql.sql.planner.plan.Patterns.filter) ComparisonExpression(io.prestosql.sql.tree.ComparisonExpression) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) RowExpression(io.prestosql.spi.relation.RowExpression) Patterns.tableScan(io.prestosql.sql.planner.plan.Patterns.tableScan) Expression(io.prestosql.sql.tree.Expression) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) TableDeleteNode(io.prestosql.sql.planner.plan.TableDeleteNode) Symbol(io.prestosql.spi.plan.Symbol) FilterNode(io.prestosql.spi.plan.FilterNode) PlanNode(io.prestosql.spi.plan.PlanNode) TableScanNode(io.prestosql.spi.plan.TableScanNode) OriginalExpressionUtils.castToRowExpression(io.prestosql.sql.relational.OriginalExpressionUtils.castToRowExpression) ComparisonExpression(io.prestosql.sql.tree.ComparisonExpression) RowExpression(io.prestosql.spi.relation.RowExpression) Expression(io.prestosql.sql.tree.Expression) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode)

Example 5 with TableWriterNode

use of io.prestosql.sql.planner.plan.TableWriterNode in project hetu-core by openlookeng.

the class PushPredicateIntoUpdateDelete method apply.

@Override
public Result apply(TableWriterNode writerNode, Captures captures, Context context) {
    TableWriterNode.WriterTarget target = writerNode.getTarget();
    if (target instanceof TableWriterNode.UpdateDeleteReference) {
        TableWriterNode.UpdateDeleteReference updateReference = (TableWriterNode.UpdateDeleteReference) target;
        if (!updateReference.getConstraint().isPresent()) {
            return Result.empty();
        }
        TableHandle tableHandle = pushPredicateToUpdateDelete(updateReference.getHandle(), updateReference.getColumnAssignments(), updateReference.getConstraint().get(), context.getSession(), context.getSymbolAllocator().getTypes(), metadata);
        if (tableHandle != null) {
            updateReference.setHandle(tableHandle);
        }
    }
    // directly modified the target
    return Result.empty();
}
Also used : TableHandle(io.prestosql.spi.metadata.TableHandle) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode)

Aggregations

TableWriterNode (io.prestosql.sql.planner.plan.TableWriterNode)6 PlanNode (io.prestosql.spi.plan.PlanNode)5 StatisticAggregations (io.prestosql.sql.planner.plan.StatisticAggregations)3 Session (io.prestosql.Session)2 Metadata (io.prestosql.metadata.Metadata)2 SchemaTableName (io.prestosql.spi.connector.SchemaTableName)2 Symbol (io.prestosql.spi.plan.Symbol)2 BasePlanTest (io.prestosql.sql.planner.assertions.BasePlanTest)2 StatisticAggregationsDescriptor (io.prestosql.sql.planner.plan.StatisticAggregationsDescriptor)2 List (java.util.List)2 Test (org.testng.annotations.Test)2 MoreObjects.toStringHelper (com.google.common.base.MoreObjects.toStringHelper)1 Preconditions.checkState (com.google.common.base.Preconditions.checkState)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)1 Iterables.getOnlyElement (com.google.common.collect.Iterables.getOnlyElement)1 DELETE_TRANSACTIONAL_TABLE_DIRECT (io.prestosql.SystemSessionProperties.DELETE_TRANSACTIONAL_TABLE_DIRECT)1 StatsProvider (io.prestosql.cost.StatsProvider)1 LogicalRowExpressions (io.prestosql.expressions.LogicalRowExpressions)1 Capture (io.prestosql.matching.Capture)1