Search in sources :

Example 1 with LimitNode

use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.

the class TestUtil method createExchangePlanFragment.

private static PlanFragment createExchangePlanFragment(RowExpression expr) {
    Symbol testSymbol = new Symbol("a");
    Map<Symbol, ColumnHandle> scanAssignments = ImmutableMap.<Symbol, ColumnHandle>builder().put(testSymbol, new TestingMetadata.TestingColumnHandle("a")).build();
    Map<Symbol, ColumnHandle> assignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(testSymbol)));
    TableScanNode tableScanNode = new TableScanNode(new PlanNodeId(UUID.randomUUID().toString()), makeTableHandle(TupleDomain.none()), ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.none(), Optional.empty(), ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, new UUID(0, 0), 0, false);
    PlanBuilder planBuilder = new PlanBuilder(new PlanNodeIdAllocator(), dummyMetadata());
    FilterNode filterNode = planBuilder.filter(expr, tableScanNode);
    PlanNode planNode = new LimitNode(new PlanNodeId("limit"), filterNode, 1, false);
    ImmutableMap.Builder<Symbol, Type> types = ImmutableMap.builder();
    for (Symbol symbol : planNode.getOutputSymbols()) {
        types.put(symbol, VARCHAR);
    }
    return new PlanFragment(new PlanFragmentId("limit_fragment_id"), planNode, types.build(), SOURCE_DISTRIBUTION, ImmutableList.of(planNode.getId()), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), planNode.getOutputSymbols()), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty(), Optional.empty(), Optional.empty());
}
Also used : ColumnHandle(io.prestosql.spi.connector.ColumnHandle) Symbol(io.prestosql.spi.plan.Symbol) PartitioningScheme(io.prestosql.sql.planner.PartitioningScheme) FilterNode(io.prestosql.spi.plan.FilterNode) PlanBuilder(io.prestosql.sql.planner.iterative.rule.test.PlanBuilder) ImmutableMap(com.google.common.collect.ImmutableMap) PlanFragment(io.prestosql.sql.planner.PlanFragment) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) Type(io.prestosql.spi.type.Type) PlanNode(io.prestosql.spi.plan.PlanNode) TableScanNode(io.prestosql.spi.plan.TableScanNode) PlanNodeIdAllocator(io.prestosql.spi.plan.PlanNodeIdAllocator) LimitNode(io.prestosql.spi.plan.LimitNode) PlanFragmentId(io.prestosql.sql.planner.plan.PlanFragmentId) UUID(java.util.UUID)

Example 2 with LimitNode

use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.

the class LogicalPlanner method createTableWriterPlan.

private RelationPlan createTableWriterPlan(Analysis analysis, RelationPlan plan, WriterTarget target, List<String> columnNames, Optional<NewTableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
    PlanNode source = plan.getRoot();
    if (!analysis.isCreateTableAsSelectWithData()) {
        source = new LimitNode(idAllocator.getNextId(), source, 0L, false);
    }
    // todo this should be checked in analysis
    writeTableLayout.ifPresent(layout -> {
        if (!ImmutableSet.copyOf(columnNames).containsAll(layout.getPartitionColumns())) {
            throw new PrestoException(NOT_SUPPORTED, "INSERT must write all distribution columns: " + layout.getPartitionColumns());
        }
    });
    List<Symbol> symbols = plan.getFieldMappings();
    Optional<PartitioningScheme> partitioningScheme = Optional.empty();
    if (writeTableLayout.isPresent()) {
        List<Symbol> partitionFunctionArguments = new ArrayList<>();
        writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
        List<Symbol> outputLayout = new ArrayList<>(symbols);
        PartitioningHandle partitioningHandle = writeTableLayout.get().getPartitioning().orElse(FIXED_HASH_DISTRIBUTION);
        partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle, partitionFunctionArguments), outputLayout));
    }
    if (!statisticsMetadata.isEmpty()) {
        verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
        Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
        TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
        StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(planSymbolAllocator, metadata);
        // partial aggregation is run within the TableWriteOperator to calculate the statistics for
        // the data consumed by the TableWriteOperator
        // final aggregation is run within the TableFinishOperator to summarize collected statistics
        // by the partial aggregation from all of the writer nodes
        StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
        PlanNode writerNode = new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get)));
        TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), writerNode, target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
        return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
    }
    TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.empty(), Optional.empty()), target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
    return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
Also used : TableStatisticAggregation(io.prestosql.sql.planner.StatisticsAggregationPlanner.TableStatisticAggregation) Symbol(io.prestosql.spi.plan.Symbol) ArrayList(java.util.ArrayList) PrestoException(io.prestosql.spi.PrestoException) TableFinishNode(io.prestosql.sql.planner.plan.TableFinishNode) StatisticAggregations(io.prestosql.sql.planner.plan.StatisticAggregations) PlanNode(io.prestosql.spi.plan.PlanNode) LimitNode(io.prestosql.spi.plan.LimitNode) TableWriterNode(io.prestosql.sql.planner.plan.TableWriterNode)

Example 3 with LimitNode

use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.

the class TestCostCalculator method testLimit.

@Test
public void testLimit() {
    TableScanNode ts1 = tableScan("ts1", "orderkey");
    LimitNode limit = new LimitNode(new PlanNodeId("limit"), ts1, 5, false);
    Map<String, PlanNodeStatsEstimate> stats = ImmutableMap.of("ts1", statsEstimate(ts1, 4000), "limit", // 5 * average row size
    statsEstimate(ts1, 40));
    Map<String, PlanCostEstimate> costs = ImmutableMap.of("ts1", cpuCost(1000));
    Map<String, Type> types = ImmutableMap.of("orderkey", BIGINT);
    // Do not estimate cost other than CPU for limit node.
    assertCost(limit, costs, stats, types).cpu(// 1000 + (is null boolean array) + 40
    1045).memory(0).network(0);
    assertCostEstimatedExchanges(limit, costs, stats, types).cpu(1045).memory(0).network(0);
}
Also used : PlanNodeId(io.prestosql.spi.plan.PlanNodeId) Type(io.prestosql.spi.type.Type) TableScanNode(io.prestosql.spi.plan.TableScanNode) LimitNode(io.prestosql.spi.plan.LimitNode) Test(org.testng.annotations.Test)

Example 4 with LimitNode

use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.

the class PushLimitThroughProject method apply.

@Override
public Result apply(LimitNode parent, Captures captures, Context context) {
    ProjectNode projectNode = captures.get(CHILD);
    // for a LimitNode without ties, simply reorder the nodes
    if (!parent.isWithTies()) {
        return Result.ofPlanNode(transpose(parent, projectNode));
    }
    // for a LimitNode with ties, the tiesResolvingScheme must be rewritten in terms of symbols before projection
    SymbolMapper.Builder symbolMapper = SymbolMapper.builder();
    for (Symbol symbol : parent.getTiesResolvingScheme().get().getOrderBy()) {
        Expression expression = castToExpression(projectNode.getAssignments().get(symbol));
        // if a symbol results from some computation, the translation fails
        if (!(expression instanceof SymbolReference)) {
            return Result.empty();
        }
        symbolMapper.put(symbol, SymbolUtils.from(expression));
    }
    LimitNode mappedLimitNode = symbolMapper.build().map(parent, projectNode.getSource());
    return Result.ofPlanNode(projectNode.replaceChildren(ImmutableList.of(mappedLimitNode)));
}
Also used : SymbolMapper(io.prestosql.sql.planner.optimizations.SymbolMapper) OriginalExpressionUtils.castToExpression(io.prestosql.sql.relational.OriginalExpressionUtils.castToExpression) Expression(io.prestosql.sql.tree.Expression) LimitNode(io.prestosql.spi.plan.LimitNode) Symbol(io.prestosql.spi.plan.Symbol) SymbolReference(io.prestosql.sql.tree.SymbolReference) ProjectNode(io.prestosql.spi.plan.ProjectNode)

Example 5 with LimitNode

use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.

the class PushLimitThroughUnion method apply.

@Override
public Result apply(LimitNode parent, Captures captures, Context context) {
    UnionNode unionNode = captures.get(CHILD);
    ImmutableList.Builder<PlanNode> builder = ImmutableList.builder();
    boolean shouldApply = false;
    for (PlanNode source : unionNode.getSources()) {
        // This check is to ensure that we don't fire the optimizer if it was previously applied.
        if (isAtMost(source, context.getLookup(), parent.getCount())) {
            builder.add(source);
        } else {
            shouldApply = true;
            builder.add(new LimitNode(context.getIdAllocator().getNextId(), source, parent.getCount(), true));
        }
    }
    if (!shouldApply) {
        return Result.empty();
    }
    return Result.ofPlanNode(parent.replaceChildren(ImmutableList.of(unionNode.replaceChildren(builder.build()))));
}
Also used : PlanNode(io.prestosql.spi.plan.PlanNode) UnionNode(io.prestosql.spi.plan.UnionNode) LimitNode(io.prestosql.spi.plan.LimitNode) ImmutableList(com.google.common.collect.ImmutableList)

Aggregations

LimitNode (io.prestosql.spi.plan.LimitNode)9 PlanNode (io.prestosql.spi.plan.PlanNode)6 Symbol (io.prestosql.spi.plan.Symbol)4 PlanNodeId (io.prestosql.spi.plan.PlanNodeId)2 ProjectNode (io.prestosql.spi.plan.ProjectNode)2 TableScanNode (io.prestosql.spi.plan.TableScanNode)2 Type (io.prestosql.spi.type.Type)2 Expression (io.prestosql.sql.tree.Expression)2 Test (org.testng.annotations.Test)2 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 PrestoException (io.prestosql.spi.PrestoException)1 ColumnHandle (io.prestosql.spi.connector.ColumnHandle)1 Assignments (io.prestosql.spi.plan.Assignments)1 FilterNode (io.prestosql.spi.plan.FilterNode)1 JoinNode (io.prestosql.spi.plan.JoinNode)1 PlanNodeIdAllocator (io.prestosql.spi.plan.PlanNodeIdAllocator)1 UnionNode (io.prestosql.spi.plan.UnionNode)1 CallExpression (io.prestosql.spi.relation.CallExpression)1 RowExpression (io.prestosql.spi.relation.RowExpression)1