Search in sources :

Example 1 with WorksetIterationNode

use of org.apache.flink.optimizer.dag.WorksetIterationNode in project flink by apache.

the class GenericFlatTypePostPass method traverse.

@SuppressWarnings("unchecked")
protected void traverse(PlanNode node, T parentSchema, boolean createUtilities) {
    // distinguish the node types
    if (node instanceof SinkPlanNode) {
        SinkPlanNode sn = (SinkPlanNode) node;
        Channel inchannel = sn.getInput();
        T schema = createEmptySchema();
        sn.postPassHelper = schema;
        // add the sinks information to the schema
        try {
            getSinkSchema(sn, schema);
        } catch (ConflictingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Conflicting type infomation for the data sink '" + sn.getSinkNode().getOperator().getName() + "'.");
        }
        // descend to the input channel
        try {
            propagateToChannel(schema, inchannel, createUtilities);
        } catch (MissingFieldTypeInfoException ex) {
            throw new CompilerPostPassException("Missing type infomation for the channel that inputs to the data sink '" + sn.getSinkNode().getOperator().getName() + "'.");
        }
    } else if (node instanceof SourcePlanNode) {
        if (createUtilities) {
            ((SourcePlanNode) node).setSerializer(createSerializer(parentSchema, node));
        // nothing else to be done here. the source has no input and no strategy itself
        }
    } else if (node instanceof BulkIterationPlanNode) {
        BulkIterationPlanNode iterationNode = (BulkIterationPlanNode) node;
        // get the nodes current schema
        T schema;
        if (iterationNode.postPassHelper == null) {
            schema = createEmptySchema();
            iterationNode.postPassHelper = schema;
        } else {
            schema = (T) iterationNode.postPassHelper;
        }
        schema.increaseNumConnectionsThatContributed();
        // add the parent schema to the schema
        if (propagateParentSchemaDown) {
            addSchemaToSchema(parentSchema, schema, iterationNode.getProgramOperator().getName());
        }
        // check whether all outgoing channels have not yet contributed. come back later if not.
        if (schema.getNumConnectionsThatContributed() < iterationNode.getOutgoingChannels().size()) {
            return;
        }
        if (iterationNode.getRootOfStepFunction() instanceof NAryUnionPlanNode) {
            throw new CompilerException("Optimizer cannot compile an iteration step function where next partial solution is created by a Union node.");
        }
        // traverse the termination criterion for the first time. create schema only, no utilities. Needed in case of intermediate termination criterion
        if (iterationNode.getRootOfTerminationCriterion() != null) {
            SingleInputPlanNode addMapper = (SingleInputPlanNode) iterationNode.getRootOfTerminationCriterion();
            traverse(addMapper.getInput().getSource(), createEmptySchema(), false);
            try {
                addMapper.getInput().setSerializer(createSerializer(createEmptySchema()));
            } catch (MissingFieldTypeInfoException e) {
                throw new RuntimeException(e);
            }
        }
        // traverse the step function for the first time. create schema only, no utilities
        traverse(iterationNode.getRootOfStepFunction(), schema, false);
        T pss = (T) iterationNode.getPartialSolutionPlanNode().postPassHelper;
        if (pss == null) {
            throw new CompilerException("Error in Optimizer Post Pass: Partial solution schema is null after first traversal of the step function.");
        }
        // traverse the step function for the second time, taking the schema of the partial solution
        traverse(iterationNode.getRootOfStepFunction(), pss, createUtilities);
        if (iterationNode.getRootOfTerminationCriterion() != null) {
            SingleInputPlanNode addMapper = (SingleInputPlanNode) iterationNode.getRootOfTerminationCriterion();
            traverse(addMapper.getInput().getSource(), createEmptySchema(), createUtilities);
            try {
                addMapper.getInput().setSerializer(createSerializer(createEmptySchema()));
            } catch (MissingFieldTypeInfoException e) {
                throw new RuntimeException(e);
            }
        }
        // take the schema from the partial solution node and add its fields to the iteration result schema.
        // input and output schema need to be identical, so this is essentially a sanity check
        addSchemaToSchema(pss, schema, iterationNode.getProgramOperator().getName());
        // set the serializer
        if (createUtilities) {
            iterationNode.setSerializerForIterationChannel(createSerializer(pss, iterationNode.getPartialSolutionPlanNode()));
        }
        // done, we can now propagate our info down
        try {
            propagateToChannel(schema, iterationNode.getInput(), createUtilities);
        } catch (MissingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + iterationNode.getProgramOperator().getName() + "'. Missing type information for key field " + e.getFieldNumber());
        }
    } else if (node instanceof WorksetIterationPlanNode) {
        WorksetIterationPlanNode iterationNode = (WorksetIterationPlanNode) node;
        // get the nodes current schema
        T schema;
        if (iterationNode.postPassHelper == null) {
            schema = createEmptySchema();
            iterationNode.postPassHelper = schema;
        } else {
            schema = (T) iterationNode.postPassHelper;
        }
        schema.increaseNumConnectionsThatContributed();
        // add the parent schema to the schema (which refers to the solution set schema)
        if (propagateParentSchemaDown) {
            addSchemaToSchema(parentSchema, schema, iterationNode.getProgramOperator().getName());
        }
        // check whether all outgoing channels have not yet contributed. come back later if not.
        if (schema.getNumConnectionsThatContributed() < iterationNode.getOutgoingChannels().size()) {
            return;
        }
        if (iterationNode.getNextWorkSetPlanNode() instanceof NAryUnionPlanNode) {
            throw new CompilerException("Optimizer cannot compile a workset iteration step function where the next workset is produced by a Union node.");
        }
        if (iterationNode.getSolutionSetDeltaPlanNode() instanceof NAryUnionPlanNode) {
            throw new CompilerException("Optimizer cannot compile a workset iteration step function where the solution set delta is produced by a Union node.");
        }
        // traverse the step function
        // pass an empty schema to the next workset and the parent schema to the solution set delta
        // these first traversals are schema only
        traverse(iterationNode.getNextWorkSetPlanNode(), createEmptySchema(), false);
        traverse(iterationNode.getSolutionSetDeltaPlanNode(), schema, false);
        T wss = (T) iterationNode.getWorksetPlanNode().postPassHelper;
        T sss = (T) iterationNode.getSolutionSetPlanNode().postPassHelper;
        if (wss == null) {
            throw new CompilerException("Error in Optimizer Post Pass: Workset schema is null after first traversal of the step function.");
        }
        if (sss == null) {
            throw new CompilerException("Error in Optimizer Post Pass: Solution set schema is null after first traversal of the step function.");
        }
        // make the second pass and instantiate the utilities
        traverse(iterationNode.getNextWorkSetPlanNode(), wss, createUtilities);
        traverse(iterationNode.getSolutionSetDeltaPlanNode(), sss, createUtilities);
        // the solution set input and the result must have the same schema, this acts as a sanity check.
        try {
            for (Map.Entry<Integer, X> entry : sss) {
                Integer pos = entry.getKey();
                schema.addType(pos, entry.getValue());
            }
        } catch (ConflictingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Conflicting type information for field " + e.getFieldNumber() + " in node '" + iterationNode.getProgramOperator().getName() + "'. Contradicting types between the " + "result of the iteration and the solution set schema: " + e.getPreviousType() + " and " + e.getNewType() + ". Most probable cause: Invalid constant field annotations.");
        }
        // set the serializers and comparators
        if (createUtilities) {
            WorksetIterationNode optNode = iterationNode.getIterationNode();
            iterationNode.setWorksetSerializer(createSerializer(wss, iterationNode.getWorksetPlanNode()));
            iterationNode.setSolutionSetSerializer(createSerializer(sss, iterationNode.getSolutionSetPlanNode()));
            try {
                iterationNode.setSolutionSetComparator(createComparator(optNode.getSolutionSetKeyFields(), null, sss));
            } catch (MissingFieldTypeInfoException ex) {
                throw new CompilerPostPassException("Could not set up the solution set for workset iteration '" + optNode.getOperator().getName() + "'. Missing type information for key field " + ex.getFieldNumber() + '.');
            }
        }
        // done, we can now propagate our info down
        try {
            propagateToChannel(schema, iterationNode.getInitialSolutionSetInput(), createUtilities);
            propagateToChannel(wss, iterationNode.getInitialWorksetInput(), createUtilities);
        } catch (MissingFieldTypeInfoException ex) {
            throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + iterationNode.getProgramOperator().getName() + "'. Missing type information for key field " + ex.getFieldNumber());
        }
    } else if (node instanceof SingleInputPlanNode) {
        SingleInputPlanNode sn = (SingleInputPlanNode) node;
        // get the nodes current schema
        T schema;
        if (sn.postPassHelper == null) {
            schema = createEmptySchema();
            sn.postPassHelper = schema;
        } else {
            schema = (T) sn.postPassHelper;
        }
        schema.increaseNumConnectionsThatContributed();
        SingleInputNode optNode = sn.getSingleInputNode();
        // add the parent schema to the schema
        if (propagateParentSchemaDown) {
            addSchemaToSchema(parentSchema, schema, optNode, 0);
        }
        // check whether all outgoing channels have not yet contributed. come back later if not.
        if (schema.getNumConnectionsThatContributed() < sn.getOutgoingChannels().size()) {
            return;
        }
        // add the nodes local information
        try {
            getSingleInputNodeSchema(sn, schema);
        } catch (ConflictingFieldTypeInfoException e) {
            throw new CompilerPostPassException(getConflictingTypeErrorMessage(e, optNode.getOperator().getName()));
        }
        if (createUtilities) {
            // parameterize the node's driver strategy
            for (int i = 0; i < sn.getDriverStrategy().getNumRequiredComparators(); i++) {
                try {
                    sn.setComparator(createComparator(sn.getKeys(i), sn.getSortOrders(i), schema), i);
                } catch (MissingFieldTypeInfoException e) {
                    throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for key field " + e.getFieldNumber());
                }
            }
        }
        // done, we can now propagate our info down
        try {
            propagateToChannel(schema, sn.getInput(), createUtilities);
        } catch (MissingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Could not set up runtime strategy for input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
        }
        // don't forget the broadcast inputs
        for (Channel c : sn.getBroadcastInputs()) {
            try {
                propagateToChannel(createEmptySchema(), c, createUtilities);
            } catch (MissingFieldTypeInfoException e) {
                throw new CompilerPostPassException("Could not set up runtime strategy for broadcast channel in node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
            }
        }
    } else if (node instanceof DualInputPlanNode) {
        DualInputPlanNode dn = (DualInputPlanNode) node;
        // get the nodes current schema
        T schema1;
        T schema2;
        if (dn.postPassHelper1 == null) {
            schema1 = createEmptySchema();
            schema2 = createEmptySchema();
            dn.postPassHelper1 = schema1;
            dn.postPassHelper2 = schema2;
        } else {
            schema1 = (T) dn.postPassHelper1;
            schema2 = (T) dn.postPassHelper2;
        }
        schema1.increaseNumConnectionsThatContributed();
        schema2.increaseNumConnectionsThatContributed();
        TwoInputNode optNode = dn.getTwoInputNode();
        // add the parent schema to the schema
        if (propagateParentSchemaDown) {
            addSchemaToSchema(parentSchema, schema1, optNode, 0);
            addSchemaToSchema(parentSchema, schema2, optNode, 1);
        }
        // check whether all outgoing channels have not yet contributed. come back later if not.
        if (schema1.getNumConnectionsThatContributed() < dn.getOutgoingChannels().size()) {
            return;
        }
        // add the nodes local information
        try {
            getDualInputNodeSchema(dn, schema1, schema2);
        } catch (ConflictingFieldTypeInfoException e) {
            throw new CompilerPostPassException(getConflictingTypeErrorMessage(e, optNode.getOperator().getName()));
        }
        // parameterize the node's driver strategy
        if (createUtilities) {
            if (dn.getDriverStrategy().getNumRequiredComparators() > 0) {
                // set the individual comparators
                try {
                    dn.setComparator1(createComparator(dn.getKeysForInput1(), dn.getSortOrders(), schema1));
                    dn.setComparator2(createComparator(dn.getKeysForInput2(), dn.getSortOrders(), schema2));
                } catch (MissingFieldTypeInfoException e) {
                    throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
                }
                // set the pair comparator
                try {
                    dn.setPairComparator(createPairComparator(dn.getKeysForInput1(), dn.getKeysForInput2(), dn.getSortOrders(), schema1, schema2));
                } catch (MissingFieldTypeInfoException e) {
                    throw new CompilerPostPassException("Could not set up runtime strategy for node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
                }
            }
        }
        // done, we can now propagate our info down
        try {
            propagateToChannel(schema1, dn.getInput1(), createUtilities);
        } catch (MissingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Could not set up runtime strategy for the first input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
        }
        try {
            propagateToChannel(schema2, dn.getInput2(), createUtilities);
        } catch (MissingFieldTypeInfoException e) {
            throw new CompilerPostPassException("Could not set up runtime strategy for the second input channel to node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
        }
        // don't forget the broadcast inputs
        for (Channel c : dn.getBroadcastInputs()) {
            try {
                propagateToChannel(createEmptySchema(), c, createUtilities);
            } catch (MissingFieldTypeInfoException e) {
                throw new CompilerPostPassException("Could not set up runtime strategy for broadcast channel in node '" + optNode.getOperator().getName() + "'. Missing type information for field " + e.getFieldNumber());
            }
        }
    } else if (node instanceof NAryUnionPlanNode) {
        // only propagate the info down
        try {
            for (Channel channel : node.getInputs()) {
                propagateToChannel(parentSchema, channel, createUtilities);
            }
        } catch (MissingFieldTypeInfoException ex) {
            throw new CompilerPostPassException("Could not set up runtime strategy for the input channel to " + " a union node. Missing type information for field " + ex.getFieldNumber());
        }
    } else // catch the sources of the iterative step functions
    if (node instanceof BulkPartialSolutionPlanNode || node instanceof SolutionSetPlanNode || node instanceof WorksetPlanNode) {
        // get the nodes current schema
        T schema;
        String name;
        if (node instanceof BulkPartialSolutionPlanNode) {
            BulkPartialSolutionPlanNode psn = (BulkPartialSolutionPlanNode) node;
            if (psn.postPassHelper == null) {
                schema = createEmptySchema();
                psn.postPassHelper = schema;
            } else {
                schema = (T) psn.postPassHelper;
            }
            name = "partial solution of bulk iteration '" + psn.getPartialSolutionNode().getIterationNode().getOperator().getName() + "'";
        } else if (node instanceof SolutionSetPlanNode) {
            SolutionSetPlanNode ssn = (SolutionSetPlanNode) node;
            if (ssn.postPassHelper == null) {
                schema = createEmptySchema();
                ssn.postPassHelper = schema;
            } else {
                schema = (T) ssn.postPassHelper;
            }
            name = "solution set of workset iteration '" + ssn.getSolutionSetNode().getIterationNode().getOperator().getName() + "'";
        } else if (node instanceof WorksetPlanNode) {
            WorksetPlanNode wsn = (WorksetPlanNode) node;
            if (wsn.postPassHelper == null) {
                schema = createEmptySchema();
                wsn.postPassHelper = schema;
            } else {
                schema = (T) wsn.postPassHelper;
            }
            name = "workset of workset iteration '" + wsn.getWorksetNode().getIterationNode().getOperator().getName() + "'";
        } else {
            throw new CompilerException();
        }
        schema.increaseNumConnectionsThatContributed();
        // add the parent schema to the schema
        addSchemaToSchema(parentSchema, schema, name);
    } else {
        throw new CompilerPostPassException("Unknown node type encountered: " + node.getClass().getName());
    }
}
Also used : SingleInputNode(org.apache.flink.optimizer.dag.SingleInputNode) SolutionSetPlanNode(org.apache.flink.optimizer.plan.SolutionSetPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) BulkPartialSolutionPlanNode(org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode) Channel(org.apache.flink.optimizer.plan.Channel) NAryUnionPlanNode(org.apache.flink.optimizer.plan.NAryUnionPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SourcePlanNode(org.apache.flink.optimizer.plan.SourcePlanNode) CompilerException(org.apache.flink.optimizer.CompilerException) WorksetPlanNode(org.apache.flink.optimizer.plan.WorksetPlanNode) CompilerPostPassException(org.apache.flink.optimizer.CompilerPostPassException) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) TwoInputNode(org.apache.flink.optimizer.dag.TwoInputNode)

Example 2 with WorksetIterationNode

use of org.apache.flink.optimizer.dag.WorksetIterationNode in project flink by apache.

the class GraphCreatingVisitor method postVisit.

@Override
public void postVisit(Operator<?> c) {
    OptimizerNode n = this.con2node.get(c);
    // first connect to the predecessors
    n.setInput(this.con2node, this.defaultDataExchangeMode);
    n.setBroadcastInputs(this.con2node, this.defaultDataExchangeMode);
    // if the node represents a bulk iteration, we recursively translate the data flow now
    if (n instanceof BulkIterationNode) {
        final BulkIterationNode iterNode = (BulkIterationNode) n;
        final BulkIterationBase<?> iter = iterNode.getIterationContract();
        // pass a copy of the no iterative part into the iteration translation,
        // in case the iteration references its closure
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);
        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true, iterNode.getParallelism(), defaultDataExchangeMode, closure);
        BulkPartialSolutionNode partialSolution;
        iter.getNextPartialSolution().accept(recursiveCreator);
        partialSolution = (BulkPartialSolutionNode) recursiveCreator.con2node.get(iter.getPartialSolution());
        OptimizerNode rootOfStepFunction = recursiveCreator.con2node.get(iter.getNextPartialSolution());
        if (partialSolution == null) {
            throw new CompilerException("Error: The step functions result does not depend on the partial solution.");
        }
        OptimizerNode terminationCriterion = null;
        if (iter.getTerminationCriterion() != null) {
            terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
            // missing parts
            if (terminationCriterion == null) {
                iter.getTerminationCriterion().accept(recursiveCreator);
                terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
            }
        }
        iterNode.setPartialSolution(partialSolution);
        iterNode.setNextPartialSolution(rootOfStepFunction, terminationCriterion);
        // go over the contained data flow and mark the dynamic path nodes
        StaticDynamicPathIdentifier identifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
        iterNode.acceptForStepFunction(identifier);
    } else if (n instanceof WorksetIterationNode) {
        final WorksetIterationNode iterNode = (WorksetIterationNode) n;
        final DeltaIterationBase<?, ?> iter = iterNode.getIterationContract();
        // we need to ensure that both the next-workset and the solution-set-delta depend on the
        // workset.
        // One check is for free during the translation, we do the other check here as a
        // pre-condition
        {
            StepFunctionValidator wsf = new StepFunctionValidator();
            iter.getNextWorkset().accept(wsf);
            if (!wsf.hasFoundWorkset()) {
                throw new CompilerException("In the given program, the next workset does not depend on the workset. " + "This is a prerequisite in delta iterations.");
            }
        }
        // calculate the closure of the anonymous function
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);
        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true, iterNode.getParallelism(), defaultDataExchangeMode, closure);
        // descend from the solution set delta. check that it depends on both the workset
        // and the solution set. If it does depend on both, this descend should create both
        // nodes
        iter.getSolutionSetDelta().accept(recursiveCreator);
        final WorksetNode worksetNode = (WorksetNode) recursiveCreator.con2node.get(iter.getWorkset());
        if (worksetNode == null) {
            throw new CompilerException("In the given program, the solution set delta does not depend on the workset." + "This is a prerequisite in delta iterations.");
        }
        iter.getNextWorkset().accept(recursiveCreator);
        SolutionSetNode solutionSetNode = (SolutionSetNode) recursiveCreator.con2node.get(iter.getSolutionSet());
        if (solutionSetNode == null || solutionSetNode.getOutgoingConnections() == null || solutionSetNode.getOutgoingConnections().isEmpty()) {
            solutionSetNode = new SolutionSetNode((DeltaIterationBase.SolutionSetPlaceHolder<?>) iter.getSolutionSet(), iterNode);
        } else {
            for (DagConnection conn : solutionSetNode.getOutgoingConnections()) {
                OptimizerNode successor = conn.getTarget();
                if (successor.getClass() == JoinNode.class) {
                    // find out which input to the match the solution set is
                    JoinNode mn = (JoinNode) successor;
                    if (mn.getFirstPredecessorNode() == solutionSetNode) {
                        mn.makeJoinWithSolutionSet(0);
                    } else if (mn.getSecondPredecessorNode() == solutionSetNode) {
                        mn.makeJoinWithSolutionSet(1);
                    } else {
                        throw new CompilerException();
                    }
                } else if (successor.getClass() == CoGroupNode.class) {
                    CoGroupNode cg = (CoGroupNode) successor;
                    if (cg.getFirstPredecessorNode() == solutionSetNode) {
                        cg.makeCoGroupWithSolutionSet(0);
                    } else if (cg.getSecondPredecessorNode() == solutionSetNode) {
                        cg.makeCoGroupWithSolutionSet(1);
                    } else {
                        throw new CompilerException();
                    }
                } else {
                    throw new InvalidProgramException("Error: The only operations allowed on the solution set are Join and CoGroup.");
                }
            }
        }
        final OptimizerNode nextWorksetNode = recursiveCreator.con2node.get(iter.getNextWorkset());
        final OptimizerNode solutionSetDeltaNode = recursiveCreator.con2node.get(iter.getSolutionSetDelta());
        // set the step function nodes to the iteration node
        iterNode.setPartialSolution(solutionSetNode, worksetNode);
        iterNode.setNextPartialSolution(solutionSetDeltaNode, nextWorksetNode, defaultDataExchangeMode);
        // go over the contained data flow and mark the dynamic path nodes
        StaticDynamicPathIdentifier pathIdentifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
        iterNode.acceptForStepFunction(pathIdentifier);
    }
}
Also used : Operator(org.apache.flink.api.common.operators.Operator) HashMap(java.util.HashMap) WorksetNode(org.apache.flink.optimizer.dag.WorksetNode) JoinNode(org.apache.flink.optimizer.dag.JoinNode) OuterJoinNode(org.apache.flink.optimizer.dag.OuterJoinNode) CoGroupNode(org.apache.flink.optimizer.dag.CoGroupNode) BulkIterationNode(org.apache.flink.optimizer.dag.BulkIterationNode) SolutionSetNode(org.apache.flink.optimizer.dag.SolutionSetNode) OptimizerNode(org.apache.flink.optimizer.dag.OptimizerNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) InvalidProgramException(org.apache.flink.api.common.InvalidProgramException) BulkPartialSolutionNode(org.apache.flink.optimizer.dag.BulkPartialSolutionNode) CompilerException(org.apache.flink.optimizer.CompilerException) DeltaIterationBase(org.apache.flink.api.common.operators.base.DeltaIterationBase) DagConnection(org.apache.flink.optimizer.dag.DagConnection)

Example 3 with WorksetIterationNode

use of org.apache.flink.optimizer.dag.WorksetIterationNode in project flink by apache.

the class GraphCreatingVisitor method preVisit.

@SuppressWarnings("deprecation")
@Override
public boolean preVisit(Operator<?> c) {
    // check if we have been here before
    if (this.con2node.containsKey(c)) {
        return false;
    }
    final OptimizerNode n;
    // create a node for the operator (or sink or source) if we have not been here before
    if (c instanceof GenericDataSinkBase) {
        DataSinkNode dsn = new DataSinkNode((GenericDataSinkBase<?>) c);
        this.sinks.add(dsn);
        n = dsn;
    } else if (c instanceof GenericDataSourceBase) {
        n = new DataSourceNode((GenericDataSourceBase<?, ?>) c);
    } else if (c instanceof MapOperatorBase) {
        n = new MapNode((MapOperatorBase<?, ?, ?>) c);
    } else if (c instanceof MapPartitionOperatorBase) {
        n = new MapPartitionNode((MapPartitionOperatorBase<?, ?, ?>) c);
    } else if (c instanceof FlatMapOperatorBase) {
        n = new FlatMapNode((FlatMapOperatorBase<?, ?, ?>) c);
    } else if (c instanceof FilterOperatorBase) {
        n = new FilterNode((FilterOperatorBase<?, ?>) c);
    } else if (c instanceof ReduceOperatorBase) {
        n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
    } else if (c instanceof GroupCombineOperatorBase) {
        n = new GroupCombineNode((GroupCombineOperatorBase<?, ?, ?>) c);
    } else if (c instanceof GroupReduceOperatorBase) {
        n = new GroupReduceNode((GroupReduceOperatorBase<?, ?, ?>) c);
    } else if (c instanceof InnerJoinOperatorBase) {
        n = new JoinNode((InnerJoinOperatorBase<?, ?, ?, ?>) c);
    } else if (c instanceof OuterJoinOperatorBase) {
        n = new OuterJoinNode((OuterJoinOperatorBase<?, ?, ?, ?>) c);
    } else if (c instanceof CoGroupOperatorBase) {
        n = new CoGroupNode((CoGroupOperatorBase<?, ?, ?, ?>) c);
    } else if (c instanceof CoGroupRawOperatorBase) {
        n = new CoGroupRawNode((CoGroupRawOperatorBase<?, ?, ?, ?>) c);
    } else if (c instanceof CrossOperatorBase) {
        n = new CrossNode((CrossOperatorBase<?, ?, ?, ?>) c);
    } else if (c instanceof BulkIterationBase) {
        n = new BulkIterationNode((BulkIterationBase<?>) c);
    } else if (c instanceof DeltaIterationBase) {
        n = new WorksetIterationNode((DeltaIterationBase<?, ?>) c);
    } else if (c instanceof Union) {
        n = new BinaryUnionNode((Union<?>) c);
    } else if (c instanceof PartitionOperatorBase) {
        n = new PartitionNode((PartitionOperatorBase<?>) c);
    } else if (c instanceof SortPartitionOperatorBase) {
        n = new SortPartitionNode((SortPartitionOperatorBase<?>) c);
    } else if (c instanceof BulkIterationBase.PartialSolutionPlaceHolder) {
        if (this.parent == null) {
            throw new InvalidProgramException("It is currently not supported to create data sinks inside iterations.");
        }
        final BulkIterationBase.PartialSolutionPlaceHolder<?> holder = (BulkIterationBase.PartialSolutionPlaceHolder<?>) c;
        final BulkIterationBase<?> enclosingIteration = holder.getContainingBulkIteration();
        final BulkIterationNode containingIterationNode = (BulkIterationNode) this.parent.con2node.get(enclosingIteration);
        // catch this for the recursive translation of step functions
        BulkPartialSolutionNode p = new BulkPartialSolutionNode(holder, containingIterationNode);
        p.setParallelism(containingIterationNode.getParallelism());
        n = p;
    } else if (c instanceof DeltaIterationBase.WorksetPlaceHolder) {
        if (this.parent == null) {
            throw new InvalidProgramException("It is currently not supported to create data sinks inside iterations.");
        }
        final DeltaIterationBase.WorksetPlaceHolder<?> holder = (DeltaIterationBase.WorksetPlaceHolder<?>) c;
        final DeltaIterationBase<?, ?> enclosingIteration = holder.getContainingWorksetIteration();
        final WorksetIterationNode containingIterationNode = (WorksetIterationNode) this.parent.con2node.get(enclosingIteration);
        // catch this for the recursive translation of step functions
        WorksetNode p = new WorksetNode(holder, containingIterationNode);
        p.setParallelism(containingIterationNode.getParallelism());
        n = p;
    } else if (c instanceof DeltaIterationBase.SolutionSetPlaceHolder) {
        if (this.parent == null) {
            throw new InvalidProgramException("It is currently not supported to create data sinks inside iterations.");
        }
        final DeltaIterationBase.SolutionSetPlaceHolder<?> holder = (DeltaIterationBase.SolutionSetPlaceHolder<?>) c;
        final DeltaIterationBase<?, ?> enclosingIteration = holder.getContainingWorksetIteration();
        final WorksetIterationNode containingIterationNode = (WorksetIterationNode) this.parent.con2node.get(enclosingIteration);
        // catch this for the recursive translation of step functions
        SolutionSetNode p = new SolutionSetNode(holder, containingIterationNode);
        p.setParallelism(containingIterationNode.getParallelism());
        n = p;
    } else {
        throw new IllegalArgumentException("Unknown operator type: " + c);
    }
    this.con2node.put(c, n);
    // key-less reducer (all-reduce)
    if (n.getParallelism() < 1) {
        // set the parallelism
        int par = c.getParallelism();
        if (n instanceof BinaryUnionNode) {
            // Keep parallelism of union undefined for now.
            // It will be determined based on the parallelism of its successor.
            par = -1;
        } else if (par > 0) {
            if (this.forceParallelism && par != this.defaultParallelism) {
                par = this.defaultParallelism;
                Optimizer.LOG.warn("The parallelism of nested dataflows (such as step functions in iterations) is " + "currently fixed to the parallelism of the surrounding operator (the iteration).");
            }
        } else {
            par = this.defaultParallelism;
        }
        n.setParallelism(par);
    }
    return true;
}
Also used : FlatMapOperatorBase(org.apache.flink.api.common.operators.base.FlatMapOperatorBase) GroupReduceOperatorBase(org.apache.flink.api.common.operators.base.GroupReduceOperatorBase) ReduceOperatorBase(org.apache.flink.api.common.operators.base.ReduceOperatorBase) FilterNode(org.apache.flink.optimizer.dag.FilterNode) CrossOperatorBase(org.apache.flink.api.common.operators.base.CrossOperatorBase) CoGroupOperatorBase(org.apache.flink.api.common.operators.base.CoGroupOperatorBase) BulkPartialSolutionNode(org.apache.flink.optimizer.dag.BulkPartialSolutionNode) DeltaIterationBase(org.apache.flink.api.common.operators.base.DeltaIterationBase) MapPartitionNode(org.apache.flink.optimizer.dag.MapPartitionNode) SortPartitionOperatorBase(org.apache.flink.api.common.operators.base.SortPartitionOperatorBase) MapPartitionOperatorBase(org.apache.flink.api.common.operators.base.MapPartitionOperatorBase) DataSinkNode(org.apache.flink.optimizer.dag.DataSinkNode) JoinNode(org.apache.flink.optimizer.dag.JoinNode) OuterJoinNode(org.apache.flink.optimizer.dag.OuterJoinNode) OuterJoinOperatorBase(org.apache.flink.api.common.operators.base.OuterJoinOperatorBase) FlatMapNode(org.apache.flink.optimizer.dag.FlatMapNode) GroupReduceOperatorBase(org.apache.flink.api.common.operators.base.GroupReduceOperatorBase) OuterJoinNode(org.apache.flink.optimizer.dag.OuterJoinNode) BulkIterationBase(org.apache.flink.api.common.operators.base.BulkIterationBase) CoGroupRawOperatorBase(org.apache.flink.api.common.operators.base.CoGroupRawOperatorBase) GroupReduceNode(org.apache.flink.optimizer.dag.GroupReduceNode) GenericDataSinkBase(org.apache.flink.api.common.operators.GenericDataSinkBase) WorksetNode(org.apache.flink.optimizer.dag.WorksetNode) CoGroupNode(org.apache.flink.optimizer.dag.CoGroupNode) FlatMapNode(org.apache.flink.optimizer.dag.FlatMapNode) MapNode(org.apache.flink.optimizer.dag.MapNode) GroupCombineNode(org.apache.flink.optimizer.dag.GroupCombineNode) Union(org.apache.flink.api.common.operators.Union) FlatMapOperatorBase(org.apache.flink.api.common.operators.base.FlatMapOperatorBase) MapOperatorBase(org.apache.flink.api.common.operators.base.MapOperatorBase) SolutionSetNode(org.apache.flink.optimizer.dag.SolutionSetNode) OptimizerNode(org.apache.flink.optimizer.dag.OptimizerNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) InvalidProgramException(org.apache.flink.api.common.InvalidProgramException) InnerJoinOperatorBase(org.apache.flink.api.common.operators.base.InnerJoinOperatorBase) DataSourceNode(org.apache.flink.optimizer.dag.DataSourceNode) SortPartitionNode(org.apache.flink.optimizer.dag.SortPartitionNode) MapPartitionOperatorBase(org.apache.flink.api.common.operators.base.MapPartitionOperatorBase) PartitionOperatorBase(org.apache.flink.api.common.operators.base.PartitionOperatorBase) SortPartitionOperatorBase(org.apache.flink.api.common.operators.base.SortPartitionOperatorBase) CoGroupRawNode(org.apache.flink.optimizer.dag.CoGroupRawNode) BinaryUnionNode(org.apache.flink.optimizer.dag.BinaryUnionNode) MapPartitionNode(org.apache.flink.optimizer.dag.MapPartitionNode) PartitionNode(org.apache.flink.optimizer.dag.PartitionNode) SortPartitionNode(org.apache.flink.optimizer.dag.SortPartitionNode) FilterOperatorBase(org.apache.flink.api.common.operators.base.FilterOperatorBase) GroupCombineOperatorBase(org.apache.flink.api.common.operators.base.GroupCombineOperatorBase) BulkIterationNode(org.apache.flink.optimizer.dag.BulkIterationNode) ReduceNode(org.apache.flink.optimizer.dag.ReduceNode) GroupReduceNode(org.apache.flink.optimizer.dag.GroupReduceNode) GenericDataSourceBase(org.apache.flink.api.common.operators.GenericDataSourceBase) CrossNode(org.apache.flink.optimizer.dag.CrossNode)

Example 4 with WorksetIterationNode

use of org.apache.flink.optimizer.dag.WorksetIterationNode in project flink by apache.

the class PlanJSONDumpGenerator method visit.

private boolean visit(DumpableNode<?> node, PrintWriter writer, boolean first) {
    // check for duplicate traversal
    if (this.nodeIds.containsKey(node)) {
        return false;
    }
    // assign an id first
    this.nodeIds.put(node, this.nodeCnt++);
    // then recurse
    for (DumpableNode<?> child : node.getPredecessors()) {
        // to set first to false!
        if (visit(child, writer, first)) {
            first = false;
        }
    }
    // check if this node should be skipped from the dump
    final OptimizerNode n = node.getOptimizerNode();
    // start a new node and output node id
    if (!first) {
        writer.print(",\n");
    }
    // open the node
    writer.print("\t{\n");
    // recurse, it is is an iteration node
    if (node instanceof BulkIterationNode || node instanceof BulkIterationPlanNode) {
        DumpableNode<?> innerChild = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getNextPartialSolution() : ((BulkIterationPlanNode) node).getRootOfStepFunction();
        DumpableNode<?> begin = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getPartialSolution() : ((BulkIterationPlanNode) node).getPartialSolutionPlanNode();
        writer.print("\t\t\"step_function\": [\n");
        visit(innerChild, writer, true);
        writer.print("\n\t\t],\n");
        writer.print("\t\t\"partial_solution\": " + this.nodeIds.get(begin) + ",\n");
        writer.print("\t\t\"next_partial_solution\": " + this.nodeIds.get(innerChild) + ",\n");
    } else if (node instanceof WorksetIterationNode || node instanceof WorksetIterationPlanNode) {
        DumpableNode<?> worksetRoot = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getNextWorkset() : ((WorksetIterationPlanNode) node).getNextWorkSetPlanNode();
        DumpableNode<?> solutionDelta = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetDelta() : ((WorksetIterationPlanNode) node).getSolutionSetDeltaPlanNode();
        DumpableNode<?> workset = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getWorksetNode() : ((WorksetIterationPlanNode) node).getWorksetPlanNode();
        DumpableNode<?> solutionSet = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetNode() : ((WorksetIterationPlanNode) node).getSolutionSetPlanNode();
        writer.print("\t\t\"step_function\": [\n");
        visit(worksetRoot, writer, true);
        visit(solutionDelta, writer, false);
        writer.print("\n\t\t],\n");
        writer.print("\t\t\"workset\": " + this.nodeIds.get(workset) + ",\n");
        writer.print("\t\t\"solution_set\": " + this.nodeIds.get(solutionSet) + ",\n");
        writer.print("\t\t\"next_workset\": " + this.nodeIds.get(worksetRoot) + ",\n");
        writer.print("\t\t\"solution_delta\": " + this.nodeIds.get(solutionDelta) + ",\n");
    }
    // print the id
    writer.print("\t\t\"id\": " + this.nodeIds.get(node));
    final String type;
    String contents;
    if (n instanceof DataSinkNode) {
        type = "sink";
        contents = n.getOperator().toString();
    } else if (n instanceof DataSourceNode) {
        type = "source";
        contents = n.getOperator().toString();
    } else if (n instanceof BulkIterationNode) {
        type = "bulk_iteration";
        contents = n.getOperator().getName();
    } else if (n instanceof WorksetIterationNode) {
        type = "workset_iteration";
        contents = n.getOperator().getName();
    } else if (n instanceof BinaryUnionNode) {
        type = "pact";
        contents = "";
    } else {
        type = "pact";
        contents = n.getOperator().getName();
    }
    contents = StringUtils.showControlCharacters(contents);
    if (encodeForHTML) {
        contents = StringEscapeUtils.escapeHtml4(contents);
        contents = contents.replace("\\", "&#92;");
    }
    String name = n.getOperatorName();
    if (name.equals("Reduce") && (node instanceof SingleInputPlanNode) && ((SingleInputPlanNode) node).getDriverStrategy() == DriverStrategy.SORTED_GROUP_COMBINE) {
        name = "Combine";
    }
    // output the type identifier
    writer.print(",\n\t\t\"type\": \"" + type + "\"");
    // output node name
    writer.print(",\n\t\t\"pact\": \"" + name + "\"");
    // output node contents
    writer.print(",\n\t\t\"contents\": \"" + contents + "\"");
    // parallelism
    writer.print(",\n\t\t\"parallelism\": \"" + (n.getParallelism() >= 1 ? n.getParallelism() : "default") + "\"");
    // output node predecessors
    Iterator<? extends DumpableConnection<?>> inConns = node.getDumpableInputs().iterator();
    String child1name = "", child2name = "";
    if (inConns != null && inConns.hasNext()) {
        // start predecessor list
        writer.print(",\n\t\t\"predecessors\": [");
        int inputNum = 0;
        while (inConns.hasNext()) {
            final DumpableConnection<?> inConn = inConns.next();
            final DumpableNode<?> source = inConn.getSource();
            writer.print(inputNum == 0 ? "\n" : ",\n");
            if (inputNum == 0) {
                child1name += child1name.length() > 0 ? ", " : "";
                child1name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
            } else if (inputNum == 1) {
                child2name += child2name.length() > 0 ? ", " : "";
                child2name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
            }
            // output predecessor id
            writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
            // output connection side
            if (inConns.hasNext() || inputNum > 0) {
                writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
            }
            // output shipping strategy and channel type
            final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
            final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() : inConn.getShipStrategy();
            String shipStrategy = null;
            if (shipType != null) {
                switch(shipType) {
                    case NONE:
                        // nothing
                        break;
                    case FORWARD:
                        shipStrategy = "Forward";
                        break;
                    case BROADCAST:
                        shipStrategy = "Broadcast";
                        break;
                    case PARTITION_HASH:
                        shipStrategy = "Hash Partition";
                        break;
                    case PARTITION_RANGE:
                        shipStrategy = "Range Partition";
                        break;
                    case PARTITION_RANDOM:
                        shipStrategy = "Redistribute";
                        break;
                    case PARTITION_FORCED_REBALANCE:
                        shipStrategy = "Rebalance";
                        break;
                    case PARTITION_CUSTOM:
                        shipStrategy = "Custom Partition";
                        break;
                    default:
                        throw new CompilerException("Unknown ship strategy '" + inConn.getShipStrategy().name() + "' in JSON generator.");
                }
            }
            if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
                shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ? channel.getShipStrategyKeys().toString() : Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
            }
            if (shipStrategy != null) {
                writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
            }
            if (channel != null) {
                String localStrategy = null;
                switch(channel.getLocalStrategy()) {
                    case NONE:
                        break;
                    case SORT:
                        localStrategy = "Sort";
                        break;
                    case COMBININGSORT:
                        localStrategy = "Sort (combining)";
                        break;
                    default:
                        throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
                }
                if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
                    localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ? channel.getLocalStrategyKeys().toString() : Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
                }
                if (localStrategy != null) {
                    writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
                }
                if (channel != null && channel.getTempMode() != TempMode.NONE) {
                    String tempMode = channel.getTempMode().toString();
                    writer.print(", \"temp_mode\": \"" + tempMode + "\"");
                }
                if (channel != null) {
                    String exchangeMode = channel.getDataExchangeMode().toString();
                    writer.print(", \"exchange_mode\": \"" + exchangeMode + "\"");
                }
            }
            writer.print('}');
            inputNum++;
        }
        // finish predecessors
        writer.print("\n\t\t]");
    }
    // ---------------------------------------------------------------------------------------
    // the part below here is relevant only to plan nodes with concrete strategies, etc
    // ---------------------------------------------------------------------------------------
    final PlanNode p = node.getPlanNode();
    if (p == null) {
        // finish node
        writer.print("\n\t}");
        return true;
    }
    // local strategy
    String locString = null;
    if (p.getDriverStrategy() != null) {
        switch(p.getDriverStrategy()) {
            case NONE:
            case BINARY_NO_OP:
                break;
            case UNARY_NO_OP:
                locString = "No-Op";
                break;
            case MAP:
                locString = "Map";
                break;
            case FLAT_MAP:
                locString = "FlatMap";
                break;
            case MAP_PARTITION:
                locString = "Map Partition";
                break;
            case ALL_REDUCE:
                locString = "Reduce All";
                break;
            case ALL_GROUP_REDUCE:
            case ALL_GROUP_REDUCE_COMBINE:
                locString = "Group Reduce All";
                break;
            case SORTED_REDUCE:
                locString = "Sorted Reduce";
                break;
            case SORTED_PARTIAL_REDUCE:
                locString = "Sorted Combine/Reduce";
                break;
            case SORTED_GROUP_REDUCE:
                locString = "Sorted Group Reduce";
                break;
            case SORTED_GROUP_COMBINE:
                locString = "Sorted Combine";
                break;
            case HYBRIDHASH_BUILD_FIRST:
                locString = "Hybrid Hash (build: " + child1name + ")";
                break;
            case HYBRIDHASH_BUILD_SECOND:
                locString = "Hybrid Hash (build: " + child2name + ")";
                break;
            case HYBRIDHASH_BUILD_FIRST_CACHED:
                locString = "Hybrid Hash (CACHED) (build: " + child1name + ")";
                break;
            case HYBRIDHASH_BUILD_SECOND_CACHED:
                locString = "Hybrid Hash (CACHED) (build: " + child2name + ")";
                break;
            case NESTEDLOOP_BLOCKED_OUTER_FIRST:
                locString = "Nested Loops (Blocked Outer: " + child1name + ")";
                break;
            case NESTEDLOOP_BLOCKED_OUTER_SECOND:
                locString = "Nested Loops (Blocked Outer: " + child2name + ")";
                break;
            case NESTEDLOOP_STREAMED_OUTER_FIRST:
                locString = "Nested Loops (Streamed Outer: " + child1name + ")";
                break;
            case NESTEDLOOP_STREAMED_OUTER_SECOND:
                locString = "Nested Loops (Streamed Outer: " + child2name + ")";
                break;
            case INNER_MERGE:
                locString = "Merge";
                break;
            case CO_GROUP:
                locString = "Co-Group";
                break;
            default:
                locString = p.getDriverStrategy().name();
                break;
        }
        if (locString != null) {
            writer.print(",\n\t\t\"driver_strategy\": \"");
            writer.print(locString);
            writer.print("\"");
        }
    }
    {
        // output node global properties
        final GlobalProperties gp = p.getGlobalProperties();
        writer.print(",\n\t\t\"global_properties\": [\n");
        addProperty(writer, "Partitioning", gp.getPartitioning().name(), true);
        if (gp.getPartitioningFields() != null) {
            addProperty(writer, "Partitioned on", gp.getPartitioningFields().toString(), false);
        }
        if (gp.getPartitioningOrdering() != null) {
            addProperty(writer, "Partitioning Order", gp.getPartitioningOrdering().toString(), false);
        } else {
            addProperty(writer, "Partitioning Order", "(none)", false);
        }
        if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
            addProperty(writer, "Uniqueness", "not unique", false);
        } else {
            addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
        }
        writer.print("\n\t\t]");
    }
    {
        // output node local properties
        LocalProperties lp = p.getLocalProperties();
        writer.print(",\n\t\t\"local_properties\": [\n");
        if (lp.getOrdering() != null) {
            addProperty(writer, "Order", lp.getOrdering().toString(), true);
        } else {
            addProperty(writer, "Order", "(none)", true);
        }
        if (lp.getGroupedFields() != null && lp.getGroupedFields().size() > 0) {
            addProperty(writer, "Grouped on", lp.getGroupedFields().toString(), false);
        } else {
            addProperty(writer, "Grouping", "not grouped", false);
        }
        if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
            addProperty(writer, "Uniqueness", "not unique", false);
        } else {
            addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
        }
        writer.print("\n\t\t]");
    }
    // output node size estimates
    writer.print(",\n\t\t\"estimates\": [\n");
    addProperty(writer, "Est. Output Size", n.getEstimatedOutputSize() == -1 ? "(unknown)" : formatNumber(n.getEstimatedOutputSize(), "B"), true);
    addProperty(writer, "Est. Cardinality", n.getEstimatedNumRecords() == -1 ? "(unknown)" : formatNumber(n.getEstimatedNumRecords()), false);
    writer.print("\t\t]");
    // output node cost
    if (p.getNodeCosts() != null) {
        writer.print(",\n\t\t\"costs\": [\n");
        addProperty(writer, "Network", p.getNodeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getNetworkCost(), "B"), true);
        addProperty(writer, "Disk I/O", p.getNodeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getDiskCost(), "B"), false);
        addProperty(writer, "CPU", p.getNodeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getCpuCost(), ""), false);
        addProperty(writer, "Cumulative Network", p.getCumulativeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getNetworkCost(), "B"), false);
        addProperty(writer, "Cumulative Disk I/O", p.getCumulativeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getDiskCost(), "B"), false);
        addProperty(writer, "Cumulative CPU", p.getCumulativeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getCpuCost(), ""), false);
        writer.print("\n\t\t]");
    }
    // output the node compiler hints
    if (n.getOperator().getCompilerHints() != null) {
        CompilerHints hints = n.getOperator().getCompilerHints();
        CompilerHints defaults = new CompilerHints();
        String size = hints.getOutputSize() == defaults.getOutputSize() ? "(none)" : String.valueOf(hints.getOutputSize());
        String card = hints.getOutputCardinality() == defaults.getOutputCardinality() ? "(none)" : String.valueOf(hints.getOutputCardinality());
        String width = hints.getAvgOutputRecordSize() == defaults.getAvgOutputRecordSize() ? "(none)" : String.valueOf(hints.getAvgOutputRecordSize());
        String filter = hints.getFilterFactor() == defaults.getFilterFactor() ? "(none)" : String.valueOf(hints.getFilterFactor());
        writer.print(",\n\t\t\"compiler_hints\": [\n");
        addProperty(writer, "Output Size (bytes)", size, true);
        addProperty(writer, "Output Cardinality", card, false);
        addProperty(writer, "Avg. Output Record Size (bytes)", width, false);
        addProperty(writer, "Filter Factor", filter, false);
        writer.print("\t\t]");
    }
    // finish node
    writer.print("\n\t}");
    return true;
}
Also used : DataSourceNode(org.apache.flink.optimizer.dag.DataSourceNode) CompilerHints(org.apache.flink.api.common.operators.CompilerHints) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DataSinkNode(org.apache.flink.optimizer.dag.DataSinkNode) Channel(org.apache.flink.optimizer.plan.Channel) BinaryUnionNode(org.apache.flink.optimizer.dag.BinaryUnionNode) BulkIterationNode(org.apache.flink.optimizer.dag.BulkIterationNode) ShipStrategyType(org.apache.flink.runtime.operators.shipping.ShipStrategyType) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) OptimizerNode(org.apache.flink.optimizer.dag.OptimizerNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) CompilerException(org.apache.flink.optimizer.CompilerException) LocalProperties(org.apache.flink.optimizer.dataproperties.LocalProperties) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode)

Aggregations

WorksetIterationNode (org.apache.flink.optimizer.dag.WorksetIterationNode)4 CompilerException (org.apache.flink.optimizer.CompilerException)3 BulkIterationNode (org.apache.flink.optimizer.dag.BulkIterationNode)3 OptimizerNode (org.apache.flink.optimizer.dag.OptimizerNode)3 InvalidProgramException (org.apache.flink.api.common.InvalidProgramException)2 DeltaIterationBase (org.apache.flink.api.common.operators.base.DeltaIterationBase)2 BinaryUnionNode (org.apache.flink.optimizer.dag.BinaryUnionNode)2 BulkPartialSolutionNode (org.apache.flink.optimizer.dag.BulkPartialSolutionNode)2 CoGroupNode (org.apache.flink.optimizer.dag.CoGroupNode)2 DataSinkNode (org.apache.flink.optimizer.dag.DataSinkNode)2 DataSourceNode (org.apache.flink.optimizer.dag.DataSourceNode)2 JoinNode (org.apache.flink.optimizer.dag.JoinNode)2 OuterJoinNode (org.apache.flink.optimizer.dag.OuterJoinNode)2 SolutionSetNode (org.apache.flink.optimizer.dag.SolutionSetNode)2 WorksetNode (org.apache.flink.optimizer.dag.WorksetNode)2 BulkIterationPlanNode (org.apache.flink.optimizer.plan.BulkIterationPlanNode)2 Channel (org.apache.flink.optimizer.plan.Channel)2 SingleInputPlanNode (org.apache.flink.optimizer.plan.SingleInputPlanNode)2 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)2 WorksetIterationPlanNode (org.apache.flink.optimizer.plan.WorksetIterationPlanNode)2