Search in sources :

Example 11 with PartitioningScheme

use of io.trino.sql.planner.PartitioningScheme in project trino by trinodb.

the class TestFaultTolerantStageScheduler method createPlanFragment.

private PlanFragment createPlanFragment() {
    Symbol probeColumnSymbol = new Symbol("probe_column");
    Symbol buildColumnSymbol = new Symbol("build_column");
    TableScanNode tableScan = new TableScanNode(TABLE_SCAN_NODE_ID, TEST_TABLE_HANDLE, ImmutableList.of(probeColumnSymbol), ImmutableMap.of(probeColumnSymbol, new TestingColumnHandle("column")), TupleDomain.none(), Optional.empty(), false, Optional.empty());
    RemoteSourceNode remoteSource = new RemoteSourceNode(new PlanNodeId("remote_source_id"), ImmutableList.of(SOURCE_FRAGMENT_ID_1, SOURCE_FRAGMENT_ID_2), ImmutableList.of(buildColumnSymbol), Optional.empty(), REPLICATE, TASK);
    return new PlanFragment(FRAGMENT_ID, new JoinNode(new PlanNodeId("join_id"), INNER, tableScan, remoteSource, ImmutableList.of(), tableScan.getOutputSymbols(), remoteSource.getOutputSymbols(), false, Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(REPLICATED), Optional.empty(), ImmutableMap.of(), Optional.empty()), ImmutableMap.of(probeColumnSymbol, VARCHAR, buildColumnSymbol, VARCHAR), SOURCE_DISTRIBUTION, ImmutableList.of(TABLE_SCAN_NODE_ID), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(probeColumnSymbol, buildColumnSymbol)), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty());
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) TestingColumnHandle(io.trino.testing.TestingMetadata.TestingColumnHandle) RemoteSourceNode(io.trino.sql.planner.plan.RemoteSourceNode) TableScanNode(io.trino.sql.planner.plan.TableScanNode) Symbol(io.trino.sql.planner.Symbol) JoinNode(io.trino.sql.planner.plan.JoinNode) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) PlanFragment(io.trino.sql.planner.PlanFragment)

Example 12 with PartitioningScheme

use of io.trino.sql.planner.PartitioningScheme in project trino by trinodb.

the class TestDynamicFilterService method createPlan.

private static PlanFragment createPlan(DynamicFilterId consumedDynamicFilterId, DynamicFilterId producedDynamicFilterId, PartitioningHandle stagePartitioning, ExchangeNode.Type exchangeType) {
    Symbol symbol = new Symbol("column");
    Symbol buildSymbol = new Symbol("buildColumn");
    PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
    TableScanNode tableScan = TableScanNode.newInstance(tableScanNodeId, TEST_TABLE_HANDLE, ImmutableList.of(symbol), ImmutableMap.of(symbol, new TestingMetadata.TestingColumnHandle("column")), false, Optional.empty());
    FilterNode filterNode = new FilterNode(new PlanNodeId("filter_node_id"), tableScan, createDynamicFilterExpression(session, createTestMetadataManager(), consumedDynamicFilterId, VARCHAR, symbol.toSymbolReference()));
    RemoteSourceNode remote = new RemoteSourceNode(new PlanNodeId("remote_id"), new PlanFragmentId("plan_fragment_id"), ImmutableList.of(buildSymbol), Optional.empty(), exchangeType, RetryPolicy.NONE);
    return new PlanFragment(new PlanFragmentId("plan_id"), new JoinNode(new PlanNodeId("join_id"), INNER, filterNode, remote, ImmutableList.of(), tableScan.getOutputSymbols(), remote.getOutputSymbols(), false, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of(producedDynamicFilterId, buildSymbol), Optional.empty()), ImmutableMap.of(symbol, VARCHAR), stagePartitioning, ImmutableList.of(tableScanNodeId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(symbol)), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty());
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) TestingColumnHandle(io.trino.spi.connector.TestingColumnHandle) RemoteSourceNode(io.trino.sql.planner.plan.RemoteSourceNode) TableScanNode(io.trino.sql.planner.plan.TableScanNode) Symbol(io.trino.sql.planner.Symbol) JoinNode(io.trino.sql.planner.plan.JoinNode) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) FilterNode(io.trino.sql.planner.plan.FilterNode) PlanFragmentId(io.trino.sql.planner.plan.PlanFragmentId) PlanFragment(io.trino.sql.planner.PlanFragment)

Example 13 with PartitioningScheme

use of io.trino.sql.planner.PartitioningScheme in project trino by trinodb.

the class PushProjectionThroughExchange method apply.

@Override
public Result apply(ProjectNode project, Captures captures, Context context) {
    ExchangeNode exchange = captures.get(CHILD);
    Set<Symbol> partitioningColumns = exchange.getPartitioningScheme().getPartitioning().getColumns();
    ImmutableList.Builder<PlanNode> newSourceBuilder = ImmutableList.builder();
    ImmutableList.Builder<List<Symbol>> inputsBuilder = ImmutableList.builder();
    for (int i = 0; i < exchange.getSources().size(); i++) {
        Map<Symbol, Symbol> outputToInputMap = mapExchangeOutputToInput(exchange, i);
        Assignments.Builder projections = Assignments.builder();
        ImmutableList.Builder<Symbol> inputs = ImmutableList.builder();
        // Need to retain the partition keys for the exchange
        partitioningColumns.stream().map(outputToInputMap::get).forEach(inputSymbol -> {
            projections.put(inputSymbol, inputSymbol.toSymbolReference());
            inputs.add(inputSymbol);
        });
        // Need to retain the hash symbol for the exchange
        exchange.getPartitioningScheme().getHashColumn().map(outputToInputMap::get).ifPresent(inputSymbol -> {
            projections.put(inputSymbol, inputSymbol.toSymbolReference());
            inputs.add(inputSymbol);
        });
        if (exchange.getOrderingScheme().isPresent()) {
            // Need to retain ordering columns for the exchange
            exchange.getOrderingScheme().get().getOrderBy().stream().filter(symbol -> !partitioningColumns.contains(symbol)).map(outputToInputMap::get).forEach(inputSymbol -> {
                projections.put(inputSymbol, inputSymbol.toSymbolReference());
                inputs.add(inputSymbol);
            });
        }
        ImmutableSet.Builder<Symbol> outputBuilder = ImmutableSet.builder();
        partitioningColumns.forEach(outputBuilder::add);
        exchange.getPartitioningScheme().getHashColumn().ifPresent(outputBuilder::add);
        exchange.getOrderingScheme().ifPresent(orderingScheme -> outputBuilder.addAll(orderingScheme.getOrderBy()));
        Set<Symbol> partitioningHashAndOrderingOutputs = outputBuilder.build();
        Map<Symbol, Expression> translationMap = outputToInputMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().toSymbolReference()));
        for (Map.Entry<Symbol, Expression> projection : project.getAssignments().entrySet()) {
            // Skip identity projection if symbol is in outputs already
            if (partitioningHashAndOrderingOutputs.contains(projection.getKey())) {
                continue;
            }
            Expression translatedExpression = inlineSymbols(translationMap, projection.getValue());
            Type type = context.getSymbolAllocator().getTypes().get(projection.getKey());
            Symbol symbol = context.getSymbolAllocator().newSymbol(translatedExpression, type);
            projections.put(symbol, translatedExpression);
            inputs.add(symbol);
        }
        newSourceBuilder.add(new ProjectNode(context.getIdAllocator().getNextId(), exchange.getSources().get(i), projections.build()));
        inputsBuilder.add(inputs.build());
    }
    // Construct the output symbols in the same order as the sources
    ImmutableList.Builder<Symbol> outputBuilder = ImmutableList.builder();
    partitioningColumns.forEach(outputBuilder::add);
    exchange.getPartitioningScheme().getHashColumn().ifPresent(outputBuilder::add);
    if (exchange.getOrderingScheme().isPresent()) {
        exchange.getOrderingScheme().get().getOrderBy().stream().filter(symbol -> !partitioningColumns.contains(symbol)).forEach(outputBuilder::add);
    }
    Set<Symbol> partitioningHashAndOrderingOutputs = ImmutableSet.copyOf(outputBuilder.build());
    for (Map.Entry<Symbol, Expression> projection : project.getAssignments().entrySet()) {
        // Do not add output for identity projection if symbol is in outputs already
        if (partitioningHashAndOrderingOutputs.contains(projection.getKey())) {
            continue;
        }
        outputBuilder.add(projection.getKey());
    }
    // outputBuilder contains all partition and hash symbols so simply swap the output layout
    PartitioningScheme partitioningScheme = new PartitioningScheme(exchange.getPartitioningScheme().getPartitioning(), outputBuilder.build(), exchange.getPartitioningScheme().getHashColumn(), exchange.getPartitioningScheme().isReplicateNullsAndAny(), exchange.getPartitioningScheme().getBucketToPartition());
    PlanNode result = new ExchangeNode(exchange.getId(), exchange.getType(), exchange.getScope(), partitioningScheme, newSourceBuilder.build(), inputsBuilder.build(), exchange.getOrderingScheme());
    // we need to strip unnecessary symbols (hash, partitioning columns).
    return Result.ofPlanNode(restrictOutputs(context.getIdAllocator(), result, ImmutableSet.copyOf(project.getOutputSymbols())).orElse(result));
}
Also used : Type(io.trino.spi.type.Type) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) Capture.newCapture(io.trino.matching.Capture.newCapture) PlanNode(io.trino.sql.planner.plan.PlanNode) ExpressionSymbolInliner.inlineSymbols(io.trino.sql.planner.ExpressionSymbolInliner.inlineSymbols) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) Rule(io.trino.sql.planner.iterative.Rule) ProjectNode(io.trino.sql.planner.plan.ProjectNode) Symbol(io.trino.sql.planner.Symbol) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Assignments(io.trino.sql.planner.plan.Assignments) Set(java.util.Set) Collectors(java.util.stream.Collectors) Capture(io.trino.matching.Capture) Patterns.exchange(io.trino.sql.planner.plan.Patterns.exchange) List(java.util.List) Pattern(io.trino.matching.Pattern) Patterns.source(io.trino.sql.planner.plan.Patterns.source) SymbolReference(io.trino.sql.tree.SymbolReference) Captures(io.trino.matching.Captures) Util.restrictOutputs(io.trino.sql.planner.iterative.rule.Util.restrictOutputs) ExchangeNode(io.trino.sql.planner.plan.ExchangeNode) Expression(io.trino.sql.tree.Expression) Patterns.project(io.trino.sql.planner.plan.Patterns.project) ExchangeNode(io.trino.sql.planner.plan.ExchangeNode) Symbol(io.trino.sql.planner.Symbol) ImmutableList(com.google.common.collect.ImmutableList) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) Assignments(io.trino.sql.planner.plan.Assignments) Type(io.trino.spi.type.Type) PlanNode(io.trino.sql.planner.plan.PlanNode) ImmutableSet(com.google.common.collect.ImmutableSet) Expression(io.trino.sql.tree.Expression) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ProjectNode(io.trino.sql.planner.plan.ProjectNode) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 14 with PartitioningScheme

use of io.trino.sql.planner.PartitioningScheme in project trino by trinodb.

the class PushRemoteExchangeThroughAssignUniqueId method apply.

@Override
public Result apply(ExchangeNode node, Captures captures, Context context) {
    checkArgument(node.getOrderingScheme().isEmpty(), "Merge exchange over AssignUniqueId not supported");
    AssignUniqueId assignUniqueId = captures.get(ASSIGN_UNIQUE_ID);
    PartitioningScheme partitioningScheme = node.getPartitioningScheme();
    if (partitioningScheme.getPartitioning().getColumns().contains(assignUniqueId.getIdColumn())) {
        // Hence, AssignUniqueId node has to stay below the exchange node.
        return Result.empty();
    }
    return Result.ofPlanNode(new AssignUniqueId(assignUniqueId.getId(), new ExchangeNode(node.getId(), node.getType(), node.getScope(), new PartitioningScheme(partitioningScheme.getPartitioning(), removeSymbol(partitioningScheme.getOutputLayout(), assignUniqueId.getIdColumn()), partitioningScheme.getHashColumn(), partitioningScheme.isReplicateNullsAndAny(), partitioningScheme.getBucketToPartition()), ImmutableList.of(assignUniqueId.getSource()), ImmutableList.of(removeSymbol(getOnlyElement(node.getInputs()), assignUniqueId.getIdColumn())), Optional.empty()), assignUniqueId.getIdColumn()));
}
Also used : AssignUniqueId(io.trino.sql.planner.plan.AssignUniqueId) ExchangeNode(io.trino.sql.planner.plan.ExchangeNode) PartitioningScheme(io.trino.sql.planner.PartitioningScheme)

Example 15 with PartitioningScheme

use of io.trino.sql.planner.PartitioningScheme in project trino by trinodb.

the class PushPartialAggregationThroughExchange method pushPartial.

private PlanNode pushPartial(AggregationNode aggregation, ExchangeNode exchange, Context context) {
    List<PlanNode> partials = new ArrayList<>();
    for (int i = 0; i < exchange.getSources().size(); i++) {
        PlanNode source = exchange.getSources().get(i);
        SymbolMapper.Builder mappingsBuilder = SymbolMapper.builder();
        for (int outputIndex = 0; outputIndex < exchange.getOutputSymbols().size(); outputIndex++) {
            Symbol output = exchange.getOutputSymbols().get(outputIndex);
            Symbol input = exchange.getInputs().get(i).get(outputIndex);
            if (!output.equals(input)) {
                mappingsBuilder.put(output, input);
            }
        }
        SymbolMapper symbolMapper = mappingsBuilder.build();
        AggregationNode mappedPartial = symbolMapper.map(aggregation, source, context.getIdAllocator().getNextId());
        Assignments.Builder assignments = Assignments.builder();
        for (Symbol output : aggregation.getOutputSymbols()) {
            Symbol input = symbolMapper.map(output);
            assignments.put(output, input.toSymbolReference());
        }
        partials.add(new ProjectNode(context.getIdAllocator().getNextId(), mappedPartial, assignments.build()));
    }
    for (PlanNode node : partials) {
        verify(aggregation.getOutputSymbols().equals(node.getOutputSymbols()));
    }
    // Since this exchange source is now guaranteed to have the same symbols as the inputs to the partial
    // aggregation, we don't need to rewrite symbols in the partitioning function
    PartitioningScheme partitioning = new PartitioningScheme(exchange.getPartitioningScheme().getPartitioning(), aggregation.getOutputSymbols(), exchange.getPartitioningScheme().getHashColumn(), exchange.getPartitioningScheme().isReplicateNullsAndAny(), exchange.getPartitioningScheme().getBucketToPartition());
    return new ExchangeNode(context.getIdAllocator().getNextId(), exchange.getType(), exchange.getScope(), partitioning, partials, ImmutableList.copyOf(Collections.nCopies(partials.size(), aggregation.getOutputSymbols())), Optional.empty());
}
Also used : SymbolMapper(io.trino.sql.planner.optimizations.SymbolMapper) ExchangeNode(io.trino.sql.planner.plan.ExchangeNode) Symbol(io.trino.sql.planner.Symbol) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) ArrayList(java.util.ArrayList) Assignments(io.trino.sql.planner.plan.Assignments) AggregationNode(io.trino.sql.planner.plan.AggregationNode) PlanNode(io.trino.sql.planner.plan.PlanNode) ProjectNode(io.trino.sql.planner.plan.ProjectNode)

Aggregations

PartitioningScheme (io.trino.sql.planner.PartitioningScheme)16 Symbol (io.trino.sql.planner.Symbol)14 PlanFragment (io.trino.sql.planner.PlanFragment)7 PlanNodeId (io.trino.sql.planner.plan.PlanNodeId)7 PlanFragmentId (io.trino.sql.planner.plan.PlanFragmentId)6 ImmutableSet (com.google.common.collect.ImmutableSet)5 ExchangeNode (io.trino.sql.planner.plan.ExchangeNode)5 PlanNode (io.trino.sql.planner.plan.PlanNode)5 RemoteSourceNode (io.trino.sql.planner.plan.RemoteSourceNode)5 JoinNode (io.trino.sql.planner.plan.JoinNode)4 TableScanNode (io.trino.sql.planner.plan.TableScanNode)4 ImmutableList (com.google.common.collect.ImmutableList)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Type (io.trino.spi.type.Type)3 AggregationNode (io.trino.sql.planner.plan.AggregationNode)3 Assignments (io.trino.sql.planner.plan.Assignments)3 FilterNode (io.trino.sql.planner.plan.FilterNode)3 ProjectNode (io.trino.sql.planner.plan.ProjectNode)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3