Search in sources :

Example 26 with CompilerException

use of org.apache.flink.optimizer.CompilerException in project flink by apache.

the class BinaryUnionNode method getAlternativePlans.

@Override
public List<PlanNode> getAlternativePlans(CostEstimator estimator) {
    // check that union has only a single successor
    if (this.getOutgoingConnections().size() > 1) {
        throw new CompilerException("BinaryUnionNode has more than one successor.");
    }
    boolean childrenSkippedDueToReplicatedInput = false;
    // check if we have a cached version
    if (this.cachedPlans != null) {
        return this.cachedPlans;
    }
    // step down to all producer nodes and calculate alternative plans
    final List<? extends PlanNode> subPlans1 = getFirstPredecessorNode().getAlternativePlans(estimator);
    final List<? extends PlanNode> subPlans2 = getSecondPredecessorNode().getAlternativePlans(estimator);
    List<DagConnection> broadcastConnections = getBroadcastConnections();
    if (broadcastConnections != null && broadcastConnections.size() > 0) {
        throw new CompilerException("Found BroadcastVariables on a Union operation");
    }
    final ArrayList<PlanNode> outputPlans = new ArrayList<PlanNode>();
    final List<Set<? extends NamedChannel>> broadcastPlanChannels = Collections.emptyList();
    final BinaryUnionOpDescriptor operator = new BinaryUnionOpDescriptor();
    final RequestedLocalProperties noLocalProps = new RequestedLocalProperties();
    final ExecutionMode input1Mode = this.input1.getDataExchangeMode();
    final ExecutionMode input2Mode = this.input2.getDataExchangeMode();
    final int parallelism = getParallelism();
    final int inParallelism1 = getFirstPredecessorNode().getParallelism();
    final int inParallelism2 = getSecondPredecessorNode().getParallelism();
    final boolean dopChange1 = parallelism != inParallelism1;
    final boolean dopChange2 = parallelism != inParallelism2;
    final boolean input1breakPipeline = this.input1.isBreakingPipeline();
    final boolean input2breakPipeline = this.input2.isBreakingPipeline();
    // create all candidates
    for (PlanNode child1 : subPlans1) {
        if (child1.getGlobalProperties().isFullyReplicated()) {
            // fully replicated input is always locally forwarded if parallelism is not changed
            if (dopChange1) {
                // can not continue with this child
                childrenSkippedDueToReplicatedInput = true;
                continue;
            } else {
                this.input1.setShipStrategy(ShipStrategyType.FORWARD);
            }
        }
        for (PlanNode child2 : subPlans2) {
            if (child2.getGlobalProperties().isFullyReplicated()) {
                // fully replicated input is always locally forwarded if parallelism is not changed
                if (dopChange2) {
                    // can not continue with this child
                    childrenSkippedDueToReplicatedInput = true;
                    continue;
                } else {
                    this.input2.setShipStrategy(ShipStrategyType.FORWARD);
                }
            }
            // candidate at the joined branch plan. 
            if (!areBranchCompatible(child1, child2)) {
                continue;
            }
            for (RequestedGlobalProperties igps : this.channelProps) {
                // create a candidate channel for the first input. mark it cached, if the connection says so
                Channel c1 = new Channel(child1, this.input1.getMaterializationMode());
                if (this.input1.getShipStrategy() == null) {
                    // free to choose the ship strategy
                    igps.parameterizeChannel(c1, dopChange1, input1Mode, input1breakPipeline);
                    // ship strategy preserves/establishes them even under changing parallelisms
                    if (dopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
                        c1.getGlobalProperties().reset();
                    }
                } else {
                    // ship strategy fixed by compiler hint
                    ShipStrategyType shipStrategy = this.input1.getShipStrategy();
                    DataExchangeMode exMode = DataExchangeMode.select(input1Mode, shipStrategy, input1breakPipeline);
                    if (this.keys1 != null) {
                        c1.setShipStrategy(this.input1.getShipStrategy(), this.keys1.toFieldList(), exMode);
                    } else {
                        c1.setShipStrategy(this.input1.getShipStrategy(), exMode);
                    }
                    if (dopChange1) {
                        c1.adjustGlobalPropertiesForFullParallelismChange();
                    }
                }
                // create a candidate channel for the second input. mark it cached, if the connection says so
                Channel c2 = new Channel(child2, this.input2.getMaterializationMode());
                if (this.input2.getShipStrategy() == null) {
                    // free to choose the ship strategy
                    igps.parameterizeChannel(c2, dopChange2, input2Mode, input2breakPipeline);
                    // ship strategy preserves/establishes them even under changing parallelisms
                    if (dopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
                        c2.getGlobalProperties().reset();
                    }
                } else {
                    // ship strategy fixed by compiler hint
                    ShipStrategyType shipStrategy = this.input2.getShipStrategy();
                    DataExchangeMode exMode = DataExchangeMode.select(input2Mode, shipStrategy, input2breakPipeline);
                    if (this.keys2 != null) {
                        c2.setShipStrategy(this.input2.getShipStrategy(), this.keys2.toFieldList(), exMode);
                    } else {
                        c2.setShipStrategy(this.input2.getShipStrategy(), exMode);
                    }
                    if (dopChange2) {
                        c2.adjustGlobalPropertiesForFullParallelismChange();
                    }
                }
                // get the global properties and clear unique fields (not preserved anyways during the union)
                GlobalProperties p1 = c1.getGlobalProperties();
                GlobalProperties p2 = c2.getGlobalProperties();
                p1.clearUniqueFieldCombinations();
                p2.clearUniqueFieldCombinations();
                // partitioned on that field. 
                if (!igps.isTrivial() && !(p1.equals(p2))) {
                    if (c1.getShipStrategy() == ShipStrategyType.FORWARD && c2.getShipStrategy() != ShipStrategyType.FORWARD) {
                        // adjust c2 to c1
                        c2 = c2.clone();
                        p1.parameterizeChannel(c2, dopChange2, input2Mode, input2breakPipeline);
                    } else if (c2.getShipStrategy() == ShipStrategyType.FORWARD && c1.getShipStrategy() != ShipStrategyType.FORWARD) {
                        // adjust c1 to c2
                        c1 = c1.clone();
                        p2.parameterizeChannel(c1, dopChange1, input1Mode, input1breakPipeline);
                    } else if (c1.getShipStrategy() == ShipStrategyType.FORWARD && c2.getShipStrategy() == ShipStrategyType.FORWARD) {
                        boolean adjustC1 = c1.getEstimatedOutputSize() <= 0 || c2.getEstimatedOutputSize() <= 0 || c1.getEstimatedOutputSize() <= c2.getEstimatedOutputSize();
                        if (adjustC1) {
                            c2 = c2.clone();
                            p1.parameterizeChannel(c2, dopChange2, input2Mode, input2breakPipeline);
                        } else {
                            c1 = c1.clone();
                            p2.parameterizeChannel(c1, dopChange1, input1Mode, input1breakPipeline);
                        }
                    } else {
                        // excluded by the check that the required strategies must match
                        throw new CompilerException("Bug in Plan Enumeration for Union Node.");
                    }
                }
                instantiate(operator, c1, c2, broadcastPlanChannels, outputPlans, estimator, igps, igps, noLocalProps, noLocalProps);
            }
        }
    }
    if (outputPlans.isEmpty()) {
        if (childrenSkippedDueToReplicatedInput) {
            throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Invalid use of replicated input.");
        } else {
            throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Too restrictive plan hints.");
        }
    }
    // cost and prune the plans
    for (PlanNode node : outputPlans) {
        estimator.costOperator(node);
    }
    prunePlanAlternatives(outputPlans);
    outputPlans.trimToSize();
    this.cachedPlans = outputPlans;
    return outputPlans;
}
Also used : RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) Set(java.util.Set) FieldSet(org.apache.flink.api.common.operators.util.FieldSet) Channel(org.apache.flink.optimizer.plan.Channel) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel) ArrayList(java.util.ArrayList) ExecutionMode(org.apache.flink.api.common.ExecutionMode) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel) ShipStrategyType(org.apache.flink.runtime.operators.shipping.ShipStrategyType) RequestedLocalProperties(org.apache.flink.optimizer.dataproperties.RequestedLocalProperties) PlanNode(org.apache.flink.optimizer.plan.PlanNode) RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) DataExchangeMode(org.apache.flink.runtime.io.network.DataExchangeMode) CompilerException(org.apache.flink.optimizer.CompilerException) BinaryUnionOpDescriptor(org.apache.flink.optimizer.operators.BinaryUnionOpDescriptor)

Example 27 with CompilerException

use of org.apache.flink.optimizer.CompilerException in project flink by apache.

the class GroupReduceNode method initPossibleProperties.

private List<OperatorDescriptorSingle> initPossibleProperties(Partitioner<?> customPartitioner) {
    // see if an internal hint dictates the strategy to use
    final Configuration conf = getOperator().getParameters();
    final String localStrategy = conf.getString(Optimizer.HINT_LOCAL_STRATEGY, null);
    final boolean useCombiner;
    if (localStrategy != null) {
        if (Optimizer.HINT_LOCAL_STRATEGY_SORT.equals(localStrategy)) {
            useCombiner = false;
        } else if (Optimizer.HINT_LOCAL_STRATEGY_COMBINING_SORT.equals(localStrategy)) {
            if (!isCombineable()) {
                Optimizer.LOG.warn("Strategy hint for GroupReduce '" + getOperator().getName() + "' requires combinable reduce, but user function is not marked combinable.");
            }
            useCombiner = true;
        } else {
            throw new CompilerException("Invalid local strategy hint for match contract: " + localStrategy);
        }
    } else {
        useCombiner = isCombineable();
    }
    // check if we can work with a grouping (simple reducer), or if we need ordering because of a group order
    Ordering groupOrder = null;
    if (getOperator() instanceof GroupReduceOperatorBase) {
        groupOrder = getOperator().getGroupOrder();
        if (groupOrder != null && groupOrder.getNumberOfFields() == 0) {
            groupOrder = null;
        }
    }
    OperatorDescriptorSingle props = useCombiner ? (this.keys == null ? new AllGroupWithPartialPreGroupProperties() : new GroupReduceWithCombineProperties(this.keys, groupOrder, customPartitioner)) : (this.keys == null ? new AllGroupReduceProperties() : new GroupReduceProperties(this.keys, groupOrder, customPartitioner));
    return Collections.singletonList(props);
}
Also used : OperatorDescriptorSingle(org.apache.flink.optimizer.operators.OperatorDescriptorSingle) AllGroupWithPartialPreGroupProperties(org.apache.flink.optimizer.operators.AllGroupWithPartialPreGroupProperties) Configuration(org.apache.flink.configuration.Configuration) GroupReduceWithCombineProperties(org.apache.flink.optimizer.operators.GroupReduceWithCombineProperties) AllGroupReduceProperties(org.apache.flink.optimizer.operators.AllGroupReduceProperties) Ordering(org.apache.flink.api.common.operators.Ordering) GroupReduceOperatorBase(org.apache.flink.api.common.operators.base.GroupReduceOperatorBase) CompilerException(org.apache.flink.optimizer.CompilerException) GroupReduceProperties(org.apache.flink.optimizer.operators.GroupReduceProperties) AllGroupReduceProperties(org.apache.flink.optimizer.operators.AllGroupReduceProperties)

Example 28 with CompilerException

use of org.apache.flink.optimizer.CompilerException in project flink by apache.

the class JoinNode method getDataProperties.

private List<OperatorDescriptorDual> getDataProperties(InnerJoinOperatorBase<?, ?, ?, ?> joinOperatorBase, JoinHint joinHint, Partitioner<?> customPartitioner) {
    // see if an internal hint dictates the strategy to use
    Configuration conf = joinOperatorBase.getParameters();
    String localStrategy = conf.getString(Optimizer.HINT_LOCAL_STRATEGY, null);
    if (localStrategy != null) {
        final AbstractJoinDescriptor fixedDriverStrat;
        if (Optimizer.HINT_LOCAL_STRATEGY_SORT_BOTH_MERGE.equals(localStrategy) || Optimizer.HINT_LOCAL_STRATEGY_SORT_FIRST_MERGE.equals(localStrategy) || Optimizer.HINT_LOCAL_STRATEGY_SORT_SECOND_MERGE.equals(localStrategy) || Optimizer.HINT_LOCAL_STRATEGY_MERGE.equals(localStrategy)) {
            fixedDriverStrat = new SortMergeInnerJoinDescriptor(this.keys1, this.keys2);
        } else if (Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_FIRST.equals(localStrategy)) {
            fixedDriverStrat = new HashJoinBuildFirstProperties(this.keys1, this.keys2);
        } else if (Optimizer.HINT_LOCAL_STRATEGY_HASH_BUILD_SECOND.equals(localStrategy)) {
            fixedDriverStrat = new HashJoinBuildSecondProperties(this.keys1, this.keys2);
        } else {
            throw new CompilerException("Invalid local strategy hint for match contract: " + localStrategy);
        }
        if (customPartitioner != null) {
            fixedDriverStrat.setCustomPartitioner(customPartitioner);
        }
        ArrayList<OperatorDescriptorDual> list = new ArrayList<OperatorDescriptorDual>();
        list.add(fixedDriverStrat);
        return list;
    } else {
        ArrayList<OperatorDescriptorDual> list = new ArrayList<OperatorDescriptorDual>();
        joinHint = joinHint == null ? JoinHint.OPTIMIZER_CHOOSES : joinHint;
        switch(joinHint) {
            case BROADCAST_HASH_FIRST:
                list.add(new HashJoinBuildFirstProperties(this.keys1, this.keys2, true, false, false));
                break;
            case BROADCAST_HASH_SECOND:
                list.add(new HashJoinBuildSecondProperties(this.keys1, this.keys2, false, true, false));
                break;
            case REPARTITION_HASH_FIRST:
                list.add(new HashJoinBuildFirstProperties(this.keys1, this.keys2, false, false, true));
                break;
            case REPARTITION_HASH_SECOND:
                list.add(new HashJoinBuildSecondProperties(this.keys1, this.keys2, false, false, true));
                break;
            case REPARTITION_SORT_MERGE:
                list.add(new SortMergeInnerJoinDescriptor(this.keys1, this.keys2, false, false, true));
                break;
            case OPTIMIZER_CHOOSES:
                list.add(new SortMergeInnerJoinDescriptor(this.keys1, this.keys2));
                list.add(new HashJoinBuildFirstProperties(this.keys1, this.keys2));
                list.add(new HashJoinBuildSecondProperties(this.keys1, this.keys2));
                break;
            default:
                throw new CompilerException("Unrecognized join hint: " + joinHint);
        }
        if (customPartitioner != null) {
            for (OperatorDescriptorDual descr : list) {
                ((AbstractJoinDescriptor) descr).setCustomPartitioner(customPartitioner);
            }
        }
        return list;
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) HashJoinBuildFirstProperties(org.apache.flink.optimizer.operators.HashJoinBuildFirstProperties) HashJoinBuildSecondProperties(org.apache.flink.optimizer.operators.HashJoinBuildSecondProperties) ArrayList(java.util.ArrayList) CompilerException(org.apache.flink.optimizer.CompilerException) AbstractJoinDescriptor(org.apache.flink.optimizer.operators.AbstractJoinDescriptor) SortMergeInnerJoinDescriptor(org.apache.flink.optimizer.operators.SortMergeInnerJoinDescriptor) OperatorDescriptorDual(org.apache.flink.optimizer.operators.OperatorDescriptorDual)

Example 29 with CompilerException

use of org.apache.flink.optimizer.CompilerException in project flink by apache.

the class GraphCreatingVisitor method postVisit.

@Override
public void postVisit(Operator<?> c) {
    OptimizerNode n = this.con2node.get(c);
    // first connect to the predecessors
    n.setInput(this.con2node, this.defaultDataExchangeMode);
    n.setBroadcastInputs(this.con2node, this.defaultDataExchangeMode);
    // if the node represents a bulk iteration, we recursively translate the data flow now
    if (n instanceof BulkIterationNode) {
        final BulkIterationNode iterNode = (BulkIterationNode) n;
        final BulkIterationBase<?> iter = iterNode.getIterationContract();
        // pass a copy of the no iterative part into the iteration translation,
        // in case the iteration references its closure
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);
        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true, iterNode.getParallelism(), defaultDataExchangeMode, closure);
        BulkPartialSolutionNode partialSolution;
        iter.getNextPartialSolution().accept(recursiveCreator);
        partialSolution = (BulkPartialSolutionNode) recursiveCreator.con2node.get(iter.getPartialSolution());
        OptimizerNode rootOfStepFunction = recursiveCreator.con2node.get(iter.getNextPartialSolution());
        if (partialSolution == null) {
            throw new CompilerException("Error: The step functions result does not depend on the partial solution.");
        }
        OptimizerNode terminationCriterion = null;
        if (iter.getTerminationCriterion() != null) {
            terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
            // no intermediate node yet, traverse from the termination criterion to build the missing parts
            if (terminationCriterion == null) {
                iter.getTerminationCriterion().accept(recursiveCreator);
                terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
            }
        }
        iterNode.setPartialSolution(partialSolution);
        iterNode.setNextPartialSolution(rootOfStepFunction, terminationCriterion);
        // go over the contained data flow and mark the dynamic path nodes
        StaticDynamicPathIdentifier identifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
        iterNode.acceptForStepFunction(identifier);
    } else if (n instanceof WorksetIterationNode) {
        final WorksetIterationNode iterNode = (WorksetIterationNode) n;
        final DeltaIterationBase<?, ?> iter = iterNode.getIterationContract();
        // we need to ensure that both the next-workset and the solution-set-delta depend on the workset.
        // One check is for free during the translation, we do the other check here as a pre-condition
        {
            StepFunctionValidator wsf = new StepFunctionValidator();
            iter.getNextWorkset().accept(wsf);
            if (!wsf.hasFoundWorkset()) {
                throw new CompilerException("In the given program, the next workset does not depend on the workset. " + "This is a prerequisite in delta iterations.");
            }
        }
        // calculate the closure of the anonymous function
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);
        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true, iterNode.getParallelism(), defaultDataExchangeMode, closure);
        // descend from the solution set delta. check that it depends on both the workset
        // and the solution set. If it does depend on both, this descend should create both nodes
        iter.getSolutionSetDelta().accept(recursiveCreator);
        final WorksetNode worksetNode = (WorksetNode) recursiveCreator.con2node.get(iter.getWorkset());
        if (worksetNode == null) {
            throw new CompilerException("In the given program, the solution set delta does not depend on the workset." + "This is a prerequisite in delta iterations.");
        }
        iter.getNextWorkset().accept(recursiveCreator);
        SolutionSetNode solutionSetNode = (SolutionSetNode) recursiveCreator.con2node.get(iter.getSolutionSet());
        if (solutionSetNode == null || solutionSetNode.getOutgoingConnections() == null || solutionSetNode.getOutgoingConnections().isEmpty()) {
            solutionSetNode = new SolutionSetNode((DeltaIterationBase.SolutionSetPlaceHolder<?>) iter.getSolutionSet(), iterNode);
        } else {
            for (DagConnection conn : solutionSetNode.getOutgoingConnections()) {
                OptimizerNode successor = conn.getTarget();
                if (successor.getClass() == JoinNode.class) {
                    // find out which input to the match the solution set is
                    JoinNode mn = (JoinNode) successor;
                    if (mn.getFirstPredecessorNode() == solutionSetNode) {
                        mn.makeJoinWithSolutionSet(0);
                    } else if (mn.getSecondPredecessorNode() == solutionSetNode) {
                        mn.makeJoinWithSolutionSet(1);
                    } else {
                        throw new CompilerException();
                    }
                } else if (successor.getClass() == CoGroupNode.class) {
                    CoGroupNode cg = (CoGroupNode) successor;
                    if (cg.getFirstPredecessorNode() == solutionSetNode) {
                        cg.makeCoGroupWithSolutionSet(0);
                    } else if (cg.getSecondPredecessorNode() == solutionSetNode) {
                        cg.makeCoGroupWithSolutionSet(1);
                    } else {
                        throw new CompilerException();
                    }
                } else {
                    throw new InvalidProgramException("Error: The only operations allowed on the solution set are Join and CoGroup.");
                }
            }
        }
        final OptimizerNode nextWorksetNode = recursiveCreator.con2node.get(iter.getNextWorkset());
        final OptimizerNode solutionSetDeltaNode = recursiveCreator.con2node.get(iter.getSolutionSetDelta());
        // set the step function nodes to the iteration node
        iterNode.setPartialSolution(solutionSetNode, worksetNode);
        iterNode.setNextPartialSolution(solutionSetDeltaNode, nextWorksetNode, defaultDataExchangeMode);
        // go over the contained data flow and mark the dynamic path nodes
        StaticDynamicPathIdentifier pathIdentifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
        iterNode.acceptForStepFunction(pathIdentifier);
    }
}
Also used : Operator(org.apache.flink.api.common.operators.Operator) HashMap(java.util.HashMap) WorksetNode(org.apache.flink.optimizer.dag.WorksetNode) JoinNode(org.apache.flink.optimizer.dag.JoinNode) OuterJoinNode(org.apache.flink.optimizer.dag.OuterJoinNode) CoGroupNode(org.apache.flink.optimizer.dag.CoGroupNode) BulkIterationNode(org.apache.flink.optimizer.dag.BulkIterationNode) SolutionSetNode(org.apache.flink.optimizer.dag.SolutionSetNode) OptimizerNode(org.apache.flink.optimizer.dag.OptimizerNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) InvalidProgramException(org.apache.flink.api.common.InvalidProgramException) BulkPartialSolutionNode(org.apache.flink.optimizer.dag.BulkPartialSolutionNode) CompilerException(org.apache.flink.optimizer.CompilerException) DeltaIterationBase(org.apache.flink.api.common.operators.base.DeltaIterationBase) DagConnection(org.apache.flink.optimizer.dag.DagConnection)

Example 30 with CompilerException

use of org.apache.flink.optimizer.CompilerException in project flink by apache.

the class TwoInputNode method placePipelineBreakersIfNecessary.

protected void placePipelineBreakersIfNecessary(DriverStrategy strategy, Channel in1, Channel in2) {
    // whether either no input, or all of them have a dam
    if (in1.isOnDynamicPath() && in2.isOnDynamicPath() && this.hereJoinedBranches != null && this.hereJoinedBranches.size() > 0) {
        boolean someDamOnLeftPaths = false;
        boolean damOnAllLeftPaths = true;
        boolean someDamOnRightPaths = false;
        boolean damOnAllRightPaths = true;
        if (strategy.firstDam() == DamBehavior.FULL_DAM || in1.getLocalStrategy().dams() || in1.getTempMode().breaksPipeline()) {
            someDamOnLeftPaths = true;
        } else {
            for (OptimizerNode brancher : this.hereJoinedBranches) {
                PlanNode candAtBrancher = in1.getSource().getCandidateAtBranchPoint(brancher);
                // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
                if (candAtBrancher == null) {
                    continue;
                }
                SourceAndDamReport res = in1.getSource().hasDamOnPathDownTo(candAtBrancher);
                if (res == NOT_FOUND) {
                    throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
                } else if (res == FOUND_SOURCE) {
                    damOnAllLeftPaths = false;
                } else if (res == FOUND_SOURCE_AND_DAM) {
                    someDamOnLeftPaths = true;
                } else {
                    throw new CompilerException();
                }
            }
        }
        if (strategy.secondDam() == DamBehavior.FULL_DAM || in2.getLocalStrategy().dams() || in2.getTempMode().breaksPipeline()) {
            someDamOnRightPaths = true;
        } else {
            for (OptimizerNode brancher : this.hereJoinedBranches) {
                PlanNode candAtBrancher = in2.getSource().getCandidateAtBranchPoint(brancher);
                // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
                if (candAtBrancher == null) {
                    continue;
                }
                SourceAndDamReport res = in2.getSource().hasDamOnPathDownTo(candAtBrancher);
                if (res == NOT_FOUND) {
                    throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
                } else if (res == FOUND_SOURCE) {
                    damOnAllRightPaths = false;
                } else if (res == FOUND_SOURCE_AND_DAM) {
                    someDamOnRightPaths = true;
                } else {
                    throw new CompilerException();
                }
            }
        }
        // okay combinations are both all dam or both no dam
        if ((damOnAllLeftPaths & damOnAllRightPaths) | (!someDamOnLeftPaths & !someDamOnRightPaths)) {
        // good, either both materialize already on the way, or both fully pipeline
        } else {
            if (someDamOnLeftPaths & !damOnAllRightPaths) {
                // right needs a pipeline breaker
                in2.setTempMode(in2.getTempMode().makePipelineBreaker());
            }
            if (someDamOnRightPaths & !damOnAllLeftPaths) {
                // right needs a pipeline breaker
                in1.setTempMode(in1.getTempMode().makePipelineBreaker());
            }
        }
    }
}
Also used : DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SourceAndDamReport(org.apache.flink.optimizer.plan.PlanNode.SourceAndDamReport) CompilerException(org.apache.flink.optimizer.CompilerException)

Aggregations

CompilerException (org.apache.flink.optimizer.CompilerException)48 PlanNode (org.apache.flink.optimizer.plan.PlanNode)16 DualInputPlanNode (org.apache.flink.optimizer.plan.DualInputPlanNode)15 Channel (org.apache.flink.optimizer.plan.Channel)14 SingleInputPlanNode (org.apache.flink.optimizer.plan.SingleInputPlanNode)14 WorksetIterationPlanNode (org.apache.flink.optimizer.plan.WorksetIterationPlanNode)13 BulkIterationPlanNode (org.apache.flink.optimizer.plan.BulkIterationPlanNode)12 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)12 SolutionSetPlanNode (org.apache.flink.optimizer.plan.SolutionSetPlanNode)12 WorksetPlanNode (org.apache.flink.optimizer.plan.WorksetPlanNode)12 BulkPartialSolutionPlanNode (org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode)11 SourcePlanNode (org.apache.flink.optimizer.plan.SourcePlanNode)11 NAryUnionPlanNode (org.apache.flink.optimizer.plan.NAryUnionPlanNode)10 NamedChannel (org.apache.flink.optimizer.plan.NamedChannel)10 IterationPlanNode (org.apache.flink.optimizer.plan.IterationPlanNode)9 ArrayList (java.util.ArrayList)8 Configuration (org.apache.flink.configuration.Configuration)8 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)8 ShipStrategyType (org.apache.flink.runtime.operators.shipping.ShipStrategyType)8 TaskConfig (org.apache.flink.runtime.operators.util.TaskConfig)8