use of io.prestosql.sql.planner.plan.ExchangeNode in project hetu-core by openlookeng.
the class ExchangeMatcher method detailMatches.
@Override
public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session session, Metadata metadata, SymbolAliases symbolAliases) {
checkState(shapeMatches(node), "Plan testing framework error: shapeMatches returned false in detailMatches in %s", this.getClass().getName());
ExchangeNode exchangeNode = (ExchangeNode) node;
if (!orderBy.isEmpty()) {
if (!exchangeNode.getOrderingScheme().isPresent()) {
return NO_MATCH;
}
if (!orderingSchemeMatches(orderBy, exchangeNode.getOrderingScheme().get(), symbolAliases)) {
return NO_MATCH;
}
}
return MatchResult.match();
}
use of io.prestosql.sql.planner.plan.ExchangeNode in project hetu-core by openlookeng.
the class TestLogicalPlanner method testBroadcastCorrelatedSubqueryAvoidsRemoteExchangeBeforeAggregation.
@Test
public void testBroadcastCorrelatedSubqueryAvoidsRemoteExchangeBeforeAggregation() {
Session broadcastJoin = Session.builder(this.getQueryRunner().getDefaultSession()).setSystemProperty(JOIN_DISTRIBUTION_TYPE, JoinDistributionType.BROADCAST.name()).setSystemProperty(FORCE_SINGLE_NODE_OUTPUT, Boolean.toString(false)).build();
// make sure there is a remote exchange on the build side
PlanMatchPattern joinBuildSideWithRemoteExchange = anyTree(node(JoinNode.class, anyTree(node(TableScanNode.class)), anyTree(exchange(REMOTE, REPLICATE, anyTree(node(TableScanNode.class))))));
// validates that there exists only one remote exchange
Consumer<Plan> validateSingleRemoteExchange = plan -> assertEquals(countOfMatchingNodes(plan, node -> node instanceof ExchangeNode && ((ExchangeNode) node).getScope() == REMOTE), 1);
Consumer<Plan> validateSingleStreamingAggregation = plan -> assertEquals(countOfMatchingNodes(plan, node -> node instanceof AggregationNode && ((AggregationNode) node).getGroupingKeys().contains(new Symbol("unique")) && ((AggregationNode) node).isStreamable()), 1);
// region is unpartitioned, AssignUniqueId should provide satisfying partitioning for count(*) after LEFT JOIN
assertPlanWithSession("SELECT (SELECT count(*) FROM region r2 WHERE r2.regionkey > r1.regionkey) FROM region r1", broadcastJoin, false, joinBuildSideWithRemoteExchange, validateSingleRemoteExchange.andThen(validateSingleStreamingAggregation));
// orders is naturally partitioned, AssignUniqueId should not overwrite its natural partitioning
assertPlanWithSession("SELECT count(count) " + "FROM (SELECT o1.orderkey orderkey, (SELECT count(*) FROM orders o2 WHERE o2.orderkey > o1.orderkey) count FROM orders o1) " + "GROUP BY orderkey", broadcastJoin, false, joinBuildSideWithRemoteExchange, validateSingleRemoteExchange.andThen(validateSingleStreamingAggregation));
}
use of io.prestosql.sql.planner.plan.ExchangeNode in project hetu-core by openlookeng.
the class TestCostCalculator method testReplicatedJoinWithExchange.
@Test
public void testReplicatedJoinWithExchange() {
TableScanNode ts1 = tableScan("ts1", "orderkey");
TableScanNode ts2 = tableScan("ts2", "orderkey_0");
ExchangeNode remoteExchange2 = replicatedExchange(new PlanNodeId("re2"), REMOTE, ts2);
ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol("orderkey_0")), Optional.empty());
JoinNode join = join("join", ts1, localExchange, JoinNode.DistributionType.REPLICATED, "orderkey", "orderkey_0");
Map<String, PlanNodeStatsEstimate> stats = ImmutableMap.<String, PlanNodeStatsEstimate>builder().put("join", statsEstimate(join, 12000)).put("re2", statsEstimate(remoteExchange2, 10000)).put("le", statsEstimate(localExchange, 6000)).put("ts1", statsEstimate(ts1, 6000)).put("ts2", statsEstimate(ts2, 1000)).build();
Map<String, Type> types = ImmutableMap.of("orderkey", BIGINT, "orderkey_0", BIGINT);
assertFragmentedEqualsUnfragmented(join, stats, types);
}
use of io.prestosql.sql.planner.plan.ExchangeNode in project hetu-core by openlookeng.
the class TestCostCalculator method testRepartitionedJoinWithExchange.
@Test
public void testRepartitionedJoinWithExchange() {
TableScanNode ts1 = tableScan("ts1", "orderkey");
TableScanNode ts2 = tableScan("ts2", "orderkey_0");
ExchangeNode remoteExchange1 = partitionedExchange(new PlanNodeId("re1"), REMOTE, ts1, ImmutableList.of(new Symbol("orderkey")), Optional.empty());
ExchangeNode remoteExchange2 = partitionedExchange(new PlanNodeId("re2"), REMOTE, ts2, ImmutableList.of(new Symbol("orderkey_0")), Optional.empty());
ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol("orderkey_0")), Optional.empty());
JoinNode join = join("join", remoteExchange1, localExchange, JoinNode.DistributionType.PARTITIONED, "orderkey", "orderkey_0");
Map<String, PlanNodeStatsEstimate> stats = ImmutableMap.<String, PlanNodeStatsEstimate>builder().put("join", statsEstimate(join, 12000)).put("re1", statsEstimate(remoteExchange1, 10000)).put("re2", statsEstimate(remoteExchange2, 10000)).put("le", statsEstimate(localExchange, 6000)).put("ts1", statsEstimate(ts1, 6000)).put("ts2", statsEstimate(ts2, 1000)).build();
Map<String, Type> types = ImmutableMap.of("orderkey", BIGINT, "orderkey_0", BIGINT);
assertFragmentedEqualsUnfragmented(join, stats, types);
}
use of io.prestosql.sql.planner.plan.ExchangeNode in project hetu-core by openlookeng.
the class PushProjectionThroughExchange method apply.
@Override
public Result apply(ProjectNode project, Captures captures, Context context) {
ExchangeNode exchange = captures.get(CHILD);
Set<Symbol> partitioningColumns = exchange.getPartitioningScheme().getPartitioning().getColumns();
ImmutableList.Builder<PlanNode> newSourceBuilder = ImmutableList.builder();
ImmutableList.Builder<List<Symbol>> inputsBuilder = ImmutableList.builder();
for (int i = 0; i < exchange.getSources().size(); i++) {
Map<Symbol, VariableReferenceExpression> outputToInputMap = extractExchangeOutputToInput(exchange, i, context.getSymbolAllocator().getTypes());
Assignments.Builder projections = Assignments.builder();
ImmutableList.Builder<Symbol> inputs = ImmutableList.builder();
// Need to retain the partition keys for the exchange
partitioningColumns.stream().map(outputToInputMap::get).forEach(nameReference -> {
Symbol symbol = new Symbol(nameReference.getName());
projections.put(symbol, nameReference);
inputs.add(symbol);
});
if (exchange.getPartitioningScheme().getHashColumn().isPresent()) {
// Need to retain the hash symbol for the exchange
projections.put(exchange.getPartitioningScheme().getHashColumn().get(), toVariableReference(exchange.getPartitioningScheme().getHashColumn().get(), context.getSymbolAllocator().getTypes()));
inputs.add(exchange.getPartitioningScheme().getHashColumn().get());
}
if (exchange.getOrderingScheme().isPresent()) {
// need to retain ordering columns for the exchange
exchange.getOrderingScheme().get().getOrderBy().stream().filter(symbol -> !partitioningColumns.contains(symbol)).map(outputToInputMap::get).forEach(nameReference -> {
Symbol symbol = new Symbol(nameReference.getName());
projections.put(symbol, nameReference);
inputs.add(symbol);
});
}
for (Map.Entry<Symbol, RowExpression> projection : project.getAssignments().entrySet()) {
checkArgument(!isExpression(projection.getValue()), "Cannot contain OriginalExpression after AddExchange");
Map<VariableReferenceExpression, VariableReferenceExpression> variableOutputToInputMap = new LinkedHashMap<>();
outputToInputMap.forEach(((symbol, variable) -> variableOutputToInputMap.put(new VariableReferenceExpression(symbol.getName(), context.getSymbolAllocator().getTypes().get(symbol)), variable)));
RowExpression translatedExpression = RowExpressionVariableInliner.inlineVariables(variableOutputToInputMap, projection.getValue());
Symbol symbol = context.getSymbolAllocator().newSymbol(translatedExpression);
projections.put(symbol, translatedExpression);
inputs.add(symbol);
}
newSourceBuilder.add(new ProjectNode(context.getIdAllocator().getNextId(), exchange.getSources().get(i), projections.build()));
inputsBuilder.add(inputs.build());
}
// Construct the output symbols in the same order as the sources
ImmutableList.Builder<Symbol> outputBuilder = ImmutableList.builder();
partitioningColumns.forEach(outputBuilder::add);
exchange.getPartitioningScheme().getHashColumn().ifPresent(outputBuilder::add);
if (exchange.getOrderingScheme().isPresent()) {
exchange.getOrderingScheme().get().getOrderBy().stream().filter(symbol -> !partitioningColumns.contains(symbol)).forEach(outputBuilder::add);
}
for (Map.Entry<Symbol, RowExpression> projection : project.getAssignments().entrySet()) {
outputBuilder.add(projection.getKey());
}
// outputBuilder contains all partition and hash symbols so simply swap the output layout
PartitioningScheme partitioningScheme = new PartitioningScheme(exchange.getPartitioningScheme().getPartitioning(), outputBuilder.build(), exchange.getPartitioningScheme().getHashColumn(), exchange.getPartitioningScheme().isReplicateNullsAndAny(), exchange.getPartitioningScheme().getBucketToPartition());
PlanNode result = new ExchangeNode(exchange.getId(), exchange.getType(), exchange.getScope(), partitioningScheme, newSourceBuilder.build(), inputsBuilder.build(), exchange.getOrderingScheme(), AggregationNode.AggregationType.HASH);
// we need to strip unnecessary symbols (hash, partitioning columns).
return Result.ofPlanNode(restrictOutputs(context.getIdAllocator(), result, ImmutableSet.copyOf(project.getOutputSymbols()), true, context.getSymbolAllocator().getTypes()).orElse(result));
}
Aggregations