use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.
the class TestUtil method createExchangePlanFragment.
private static PlanFragment createExchangePlanFragment(RowExpression expr) {
Symbol testSymbol = new Symbol("a");
Map<Symbol, ColumnHandle> scanAssignments = ImmutableMap.<Symbol, ColumnHandle>builder().put(testSymbol, new TestingMetadata.TestingColumnHandle("a")).build();
Map<Symbol, ColumnHandle> assignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(testSymbol)));
TableScanNode tableScanNode = new TableScanNode(new PlanNodeId(UUID.randomUUID().toString()), makeTableHandle(TupleDomain.none()), ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.none(), Optional.empty(), ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, new UUID(0, 0), 0, false);
PlanBuilder planBuilder = new PlanBuilder(new PlanNodeIdAllocator(), dummyMetadata());
FilterNode filterNode = planBuilder.filter(expr, tableScanNode);
PlanNode planNode = new LimitNode(new PlanNodeId("limit"), filterNode, 1, false);
ImmutableMap.Builder<Symbol, Type> types = ImmutableMap.builder();
for (Symbol symbol : planNode.getOutputSymbols()) {
types.put(symbol, VARCHAR);
}
return new PlanFragment(new PlanFragmentId("limit_fragment_id"), planNode, types.build(), SOURCE_DISTRIBUTION, ImmutableList.of(planNode.getId()), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), planNode.getOutputSymbols()), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty(), Optional.empty(), Optional.empty());
}
use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, RelationPlan plan, WriterTarget target, List<String> columnNames, Optional<NewTableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
PlanNode source = plan.getRoot();
if (!analysis.isCreateTableAsSelectWithData()) {
source = new LimitNode(idAllocator.getNextId(), source, 0L, false);
}
// todo this should be checked in analysis
writeTableLayout.ifPresent(layout -> {
if (!ImmutableSet.copyOf(columnNames).containsAll(layout.getPartitionColumns())) {
throw new PrestoException(NOT_SUPPORTED, "INSERT must write all distribution columns: " + layout.getPartitionColumns());
}
});
List<Symbol> symbols = plan.getFieldMappings();
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
PartitioningHandle partitioningHandle = writeTableLayout.get().getPartitioning().orElse(FIXED_HASH_DISTRIBUTION);
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle, partitionFunctionArguments), outputLayout));
}
if (!statisticsMetadata.isEmpty()) {
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(planSymbolAllocator, metadata);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
PlanNode writerNode = new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get)));
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), writerNode, target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.empty(), Optional.empty()), target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.
the class TestCostCalculator method testLimit.
@Test
public void testLimit() {
TableScanNode ts1 = tableScan("ts1", "orderkey");
LimitNode limit = new LimitNode(new PlanNodeId("limit"), ts1, 5, false);
Map<String, PlanNodeStatsEstimate> stats = ImmutableMap.of("ts1", statsEstimate(ts1, 4000), "limit", // 5 * average row size
statsEstimate(ts1, 40));
Map<String, PlanCostEstimate> costs = ImmutableMap.of("ts1", cpuCost(1000));
Map<String, Type> types = ImmutableMap.of("orderkey", BIGINT);
// Do not estimate cost other than CPU for limit node.
assertCost(limit, costs, stats, types).cpu(// 1000 + (is null boolean array) + 40
1045).memory(0).network(0);
assertCostEstimatedExchanges(limit, costs, stats, types).cpu(1045).memory(0).network(0);
}
use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.
the class PushLimitThroughProject method apply.
@Override
public Result apply(LimitNode parent, Captures captures, Context context) {
ProjectNode projectNode = captures.get(CHILD);
// for a LimitNode without ties, simply reorder the nodes
if (!parent.isWithTies()) {
return Result.ofPlanNode(transpose(parent, projectNode));
}
// for a LimitNode with ties, the tiesResolvingScheme must be rewritten in terms of symbols before projection
SymbolMapper.Builder symbolMapper = SymbolMapper.builder();
for (Symbol symbol : parent.getTiesResolvingScheme().get().getOrderBy()) {
Expression expression = castToExpression(projectNode.getAssignments().get(symbol));
// if a symbol results from some computation, the translation fails
if (!(expression instanceof SymbolReference)) {
return Result.empty();
}
symbolMapper.put(symbol, SymbolUtils.from(expression));
}
LimitNode mappedLimitNode = symbolMapper.build().map(parent, projectNode.getSource());
return Result.ofPlanNode(projectNode.replaceChildren(ImmutableList.of(mappedLimitNode)));
}
use of io.prestosql.spi.plan.LimitNode in project hetu-core by openlookeng.
the class PushLimitThroughUnion method apply.
@Override
public Result apply(LimitNode parent, Captures captures, Context context) {
UnionNode unionNode = captures.get(CHILD);
ImmutableList.Builder<PlanNode> builder = ImmutableList.builder();
boolean shouldApply = false;
for (PlanNode source : unionNode.getSources()) {
// This check is to ensure that we don't fire the optimizer if it was previously applied.
if (isAtMost(source, context.getLookup(), parent.getCount())) {
builder.add(source);
} else {
shouldApply = true;
builder.add(new LimitNode(context.getIdAllocator().getNextId(), source, parent.getCount(), true));
}
}
if (!shouldApply) {
return Result.empty();
}
return Result.ofPlanNode(parent.replaceChildren(ImmutableList.of(unionNode.replaceChildren(builder.build()))));
}
Aggregations