use of io.crate.planner.node.dql.join.Join in project crate by crate.
the class HashJoin method build.
@Override
public ExecutionPlan build(PlannerContext plannerContext, Set<PlanHint> hints, ProjectionBuilder projectionBuilder, int limit, int offset, @Nullable OrderBy order, @Nullable Integer pageSizeHint, Row params, SubQueryResults subQueryResults) {
ExecutionPlan leftExecutionPlan = lhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, null, params, subQueryResults);
ExecutionPlan rightExecutionPlan = rhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, null, params, subQueryResults);
LogicalPlan leftLogicalPlan = lhs;
LogicalPlan rightLogicalPlan = rhs;
boolean tablesSwitched = false;
// revealed that this improves performance in most cases.
if (lhs.numExpectedRows() < rhs.numExpectedRows()) {
tablesSwitched = true;
leftLogicalPlan = rhs;
rightLogicalPlan = lhs;
ExecutionPlan tmp = leftExecutionPlan;
leftExecutionPlan = rightExecutionPlan;
rightExecutionPlan = tmp;
}
SubQueryAndParamBinder paramBinder = new SubQueryAndParamBinder(params, subQueryResults);
Tuple<List<Symbol>, List<Symbol>> hashSymbols = extractHashJoinSymbolsFromJoinSymbolsAndSplitPerSide(tablesSwitched);
ResultDescription leftResultDesc = leftExecutionPlan.resultDescription();
ResultDescription rightResultDesc = rightExecutionPlan.resultDescription();
Collection<String> joinExecutionNodes = leftResultDesc.nodeIds();
List<Symbol> leftOutputs = leftLogicalPlan.outputs();
List<Symbol> rightOutputs = rightLogicalPlan.outputs();
MergePhase leftMerge = null;
MergePhase rightMerge = null;
// We can only run the join distributed if no remaining limit or offset must be applied on the source relations.
// Because on distributed joins, every join is running on a slice (modulo) set of the data and so no limit/offset
// could be applied. Limit/offset can only be applied on the whole data set after all partial rows from the
// shards are merged
boolean isDistributed = leftResultDesc.hasRemainingLimitOrOffset() == false && rightResultDesc.hasRemainingLimitOrOffset() == false;
if (joinExecutionNodes.isEmpty()) {
// The left source might have zero execution nodes, for example in the case of `sys.shards` without any tables
// If the join then also uses zero execution nodes, a distributed plan no longer works because
// the source operators wouldn't have a downstream node where they can send the results to.
// → we switch to non-distributed which results in the join running on the handlerNode.
isDistributed = false;
}
if (joinExecutionNodes.size() == 1 && joinExecutionNodes.equals(rightResultDesc.nodeIds()) && !rightResultDesc.hasRemainingLimitOrOffset()) {
// If the left and the right plan are executed on the same single node the mergePhase
// should be omitted. This is the case if the left and right table have only one shards which
// are on the same node
leftExecutionPlan.setDistributionInfo(DistributionInfo.DEFAULT_SAME_NODE);
rightExecutionPlan.setDistributionInfo(DistributionInfo.DEFAULT_SAME_NODE);
} else {
if (isDistributed) {
// Run the join distributed by modulo distribution algorithm
leftOutputs = setModuloDistribution(Lists2.map(hashSymbols.v1(), paramBinder), leftLogicalPlan.outputs(), leftExecutionPlan);
rightOutputs = setModuloDistribution(Lists2.map(hashSymbols.v2(), paramBinder), rightLogicalPlan.outputs(), rightExecutionPlan);
} else {
// Run the join non-distributed on the handler node
joinExecutionNodes = Collections.singletonList(plannerContext.handlerNode());
leftExecutionPlan.setDistributionInfo(DistributionInfo.DEFAULT_BROADCAST);
rightExecutionPlan.setDistributionInfo(DistributionInfo.DEFAULT_BROADCAST);
}
leftMerge = JoinOperations.buildMergePhaseForJoin(plannerContext, leftResultDesc, joinExecutionNodes);
rightMerge = JoinOperations.buildMergePhaseForJoin(plannerContext, rightResultDesc, joinExecutionNodes);
}
List<Symbol> joinOutputs = Lists2.concat(leftOutputs, rightOutputs);
HashJoinPhase joinPhase = new HashJoinPhase(plannerContext.jobId(), plannerContext.nextExecutionPhaseId(), "hash-join", Collections.singletonList(JoinOperations.createJoinProjection(outputs, joinOutputs)), leftMerge, rightMerge, leftOutputs.size(), rightOutputs.size(), joinExecutionNodes, InputColumns.create(paramBinder.apply(joinCondition), joinOutputs), InputColumns.create(Lists2.map(hashSymbols.v1(), paramBinder), new InputColumns.SourceSymbols(leftOutputs)), InputColumns.create(Lists2.map(hashSymbols.v2(), paramBinder), new InputColumns.SourceSymbols(rightOutputs)), Symbols.typeView(leftOutputs), leftLogicalPlan.estimatedRowSize(), leftLogicalPlan.numExpectedRows());
return new Join(joinPhase, leftExecutionPlan, rightExecutionPlan, TopN.NO_LIMIT, 0, TopN.NO_LIMIT, outputs.size(), null);
}
use of io.crate.planner.node.dql.join.Join in project crate by crate.
the class NestedLoopJoin method build.
@Override
public ExecutionPlan build(PlannerContext plannerContext, Set<PlanHint> hints, ProjectionBuilder projectionBuilder, int limit, int offset, @Nullable OrderBy order, @Nullable Integer pageSizeHint, Row params, SubQueryResults subQueryResults) {
/*
* Benchmarks reveal that if rows are filtered out distributed execution gives better performance.
* Therefore if `filterNeeded` is true (there is joinCondition or a filtering after the join operation)
* then it's a good indication that distributed execution will be faster.
*
* We may at some point add some kind of session-settings to override this behaviour
* or otherwise come up with a better heuristic.
*/
Integer childPageSizeHint = !isFiltered && limit != TopN.NO_LIMIT ? limitAndOffset(limit, offset) : null;
ExecutionPlan left = lhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, childPageSizeHint, params, subQueryResults);
ExecutionPlan right = rhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, childPageSizeHint, params, subQueryResults);
PositionalOrderBy orderByFromLeft = left.resultDescription().orderBy();
boolean hasDocTables = baseTables.stream().anyMatch(r -> r instanceof DocTableRelation);
boolean isDistributed = hasDocTables && isFiltered && !joinType.isOuter();
LogicalPlan leftLogicalPlan = lhs;
LogicalPlan rightLogicalPlan = rhs;
isDistributed = isDistributed && (!left.resultDescription().nodeIds().isEmpty() && !right.resultDescription().nodeIds().isEmpty());
boolean blockNlPossible = !isDistributed && isBlockNlPossible(left, right);
JoinType joinType = this.joinType;
if (!orderByWasPushedDown && joinType.supportsInversion() && (isDistributed && lhs.numExpectedRows() < rhs.numExpectedRows() && orderByFromLeft == null) || (blockNlPossible && lhs.numExpectedRows() > rhs.numExpectedRows())) {
// 1) The right side is always broadcast-ed, so for performance reasons we switch the tables so that
// the right table is the smaller (numOfRows). If left relation has a pushed-down OrderBy that needs
// to be preserved, then the switch is not possible.
// 2) For block nested loop, the left side should always be smaller. Benchmarks have shown that the
// performance decreases if the left side is much larger and no limit is applied.
ExecutionPlan tmpExecutionPlan = left;
left = right;
right = tmpExecutionPlan;
leftLogicalPlan = rhs;
rightLogicalPlan = lhs;
joinType = joinType.invert();
}
Tuple<Collection<String>, List<MergePhase>> joinExecutionNodesAndMergePhases = configureExecution(left, right, plannerContext, isDistributed);
List<Symbol> joinOutputs = Lists2.concat(leftLogicalPlan.outputs(), rightLogicalPlan.outputs());
SubQueryAndParamBinder paramBinder = new SubQueryAndParamBinder(params, subQueryResults);
Symbol joinInput = null;
if (joinCondition != null) {
joinInput = InputColumns.create(paramBinder.apply(joinCondition), joinOutputs);
}
NestedLoopPhase nlPhase = new NestedLoopPhase(plannerContext.jobId(), plannerContext.nextExecutionPhaseId(), isDistributed ? "distributed-nested-loop" : "nested-loop", Collections.singletonList(JoinOperations.createJoinProjection(outputs, joinOutputs)), joinExecutionNodesAndMergePhases.v2().get(0), joinExecutionNodesAndMergePhases.v2().get(1), leftLogicalPlan.outputs().size(), rightLogicalPlan.outputs().size(), joinExecutionNodesAndMergePhases.v1(), joinType, joinInput, Symbols.typeView(leftLogicalPlan.outputs()), leftLogicalPlan.estimatedRowSize(), leftLogicalPlan.numExpectedRows(), blockNlPossible);
return new Join(nlPhase, left, right, TopN.NO_LIMIT, 0, TopN.NO_LIMIT, outputs.size(), orderByFromLeft);
}
use of io.crate.planner.node.dql.join.Join in project crate by crate.
the class SubQueryPlannerTest method testJoinWithGlobalAggregationOnSubSelectsWithLimitAndOffset.
@Test
public void testJoinWithGlobalAggregationOnSubSelectsWithLimitAndOffset() throws Exception {
Join join = e.plan("select count(*) from " + " (select i, a from t1 order by a limit 10 offset 2) t1 " + "join" + " (select i from t2 order by i desc limit 5 offset 5) t2 " + "on t1.i = t2.i");
QueryThenFetch leftQtf = (QueryThenFetch) join.left();
Collect left = (Collect) leftQtf.subPlan();
assertThat("1 node, otherwise mergePhases would be required", left.nodeIds().size(), is(1));
assertThat(left.collectPhase().toCollect(), isSQL("doc.t1._fetchid, doc.t1.a"));
assertThat(((RoutedCollectPhase) left.collectPhase()).orderBy(), isSQL("doc.t1.a"));
assertThat(left.collectPhase().projections(), contains(isTopN(10, 2), instanceOf(FetchProjection.class)));
Collect right = (Collect) join.right();
assertThat("1 node, otherwise mergePhases would be required", right.nodeIds().size(), is(1));
assertThat(((RoutedCollectPhase) right.collectPhase()).orderBy(), isSQL("doc.t2.i DESC"));
assertThat(right.collectPhase().projections(), contains(isTopN(5, 5)));
List<Projection> nlProjections = join.joinPhase().projections();
assertThat(nlProjections, contains(instanceOf(EvalProjection.class), instanceOf(AggregationProjection.class)));
}
use of io.crate.planner.node.dql.join.Join in project crate by crate.
the class SubQueryPlannerTest method testNestedSimpleSelectWithJoin.
@Test
public void testNestedSimpleSelectWithJoin() throws Exception {
Join nl = e.plan("select t1x from (" + "select t1.x as t1x, t2.i as t2i from t1 as t1, t1 as t2 order by t1x asc limit 10" + ") t order by t1x desc limit 3");
List<Projection> projections = nl.joinPhase().projections();
assertThat(projections, Matchers.contains(instanceOf(EvalProjection.class), isTopN(10, 0), instanceOf(OrderedTopNProjection.class), instanceOf(EvalProjection.class), isTopN(3, 0)));
assertThat(projections.get(0).outputs(), isSQL("INPUT(1), INPUT(1)"));
assertThat(projections.get(4).outputs(), isSQL("INPUT(0)"));
}
use of io.crate.planner.node.dql.join.Join in project crate by crate.
the class SubQueryPlannerTest method testJoinOnSubSelectsWithLimitAndOffset.
@Test
@SuppressWarnings("unchecked")
public void testJoinOnSubSelectsWithLimitAndOffset() throws Exception {
Join join = e.plan("select * from " + " (select i, a from t1 order by a limit 10 offset 2) t1 " + "join" + " (select i from t2 order by b limit 5 offset 5) t2 " + "on t1.i = t2.i");
assertThat(join.joinPhase().projections().size(), is(1));
assertThat(join.joinPhase().projections().get(0), instanceOf(EvalProjection.class));
QueryThenFetch leftQtf = (QueryThenFetch) join.left();
Collect left = (Collect) leftQtf.subPlan();
assertThat("1 node, otherwise mergePhases would be required", left.nodeIds().size(), is(1));
assertThat(left.orderBy(), isSQL("OrderByPositions{indices=[1], reverseFlags=[false], nullsFirst=[false]}"));
assertThat(left.collectPhase().projections(), contains(isTopN(10, 2), instanceOf(FetchProjection.class)));
QueryThenFetch rightQtf = (QueryThenFetch) join.right();
Collect right = (Collect) rightQtf.subPlan();
assertThat("1 node, otherwise mergePhases would be required", right.nodeIds().size(), is(1));
assertThat(((RoutedCollectPhase) right.collectPhase()).orderBy(), isSQL("doc.t2.b"));
assertThat(right.collectPhase().projections(), contains(isTopN(5, 5), instanceOf(FetchProjection.class), // strips `b` used in order by from the outputs
instanceOf(EvalProjection.class)));
}
Aggregations