Search in sources :

Example 16 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class PregelCompilerTest method testPregelCompiler.

@SuppressWarnings("serial")
@Test
public void testPregelCompiler() {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<Long, Long>());
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

                public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                    return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
                }
            });
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
            DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), null, 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("Pregel Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        assertTrue(ssDelta instanceof SingleInputPlanNode);
        SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
        // check the computation coGroup
        DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
        assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
        assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
        assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
        // check that the initial partitioning is pushed out of the loop
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 17 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class PregelCompilerTest method testPregelWithCombiner.

@SuppressWarnings("serial")
@Test
public void testPregelWithCombiner() {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<Long, Long>());
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

                public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                    return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
                }
            });
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
            DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), new CCCombiner(), 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("Pregel Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the combiner
        SingleInputPlanNode combiner = (SingleInputPlanNode) iteration.getInput2().getSource();
        assertEquals(ShipStrategyType.FORWARD, combiner.getInput().getShipStrategy());
        // check the solution set delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        assertTrue(ssDelta instanceof SingleInputPlanNode);
        SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
        // check the computation coGroup
        DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
        assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
        assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
        assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
        // check that the initial partitioning is pushed out of the loop
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 18 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class SpargelCompilerTest method testSpargelCompiler.

@SuppressWarnings("serial")
@Test
public void testSpargelCompiler() {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<Long, Long>());
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

                public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                    return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
                }
            });
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
            DataSet<Vertex<Long, Long>> result = graph.runScatterGatherIteration(new ConnectedComponents.CCMessenger<Long, Long>(BasicTypeInfo.LONG_TYPE_INFO), new ConnectedComponents.CCUpdater<Long, Long>(), 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("Spargel Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set join and the delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        // this is only true if the update functions preserves the partitioning
        assertTrue(ssDelta instanceof DualInputPlanNode);
        DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
        assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());
        // check the workset set join
        DualInputPlanNode edgeJoin = (DualInputPlanNode) ssJoin.getInput1().getSource();
        assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput2().getShipStrategy());
        assertTrue(edgeJoin.getInput1().getTempMode().isCached());
        assertEquals(new FieldList(0), edgeJoin.getInput1().getShipStrategyKeys());
        // check that the initial partitioning is pushed out of the loop
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput2().getShipStrategy());
        assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
        assertEquals(new FieldList(0), iteration.getInput2().getShipStrategyKeys());
        // check that the initial workset sort is outside the loop
        assertEquals(LocalStrategy.SORT, iteration.getInput2().getLocalStrategy());
        assertEquals(new FieldList(0), iteration.getInput2().getLocalStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) ConnectedComponents(org.apache.flink.graph.library.ConnectedComponents) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 19 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class PlanJSONDumpGenerator method visit.

private boolean visit(DumpableNode<?> node, PrintWriter writer, boolean first) {
    // check for duplicate traversal
    if (this.nodeIds.containsKey(node)) {
        return false;
    }
    // assign an id first
    this.nodeIds.put(node, this.nodeCnt++);
    // then recurse
    for (DumpableNode<?> child : node.getPredecessors()) {
        //to set first to false!
        if (visit(child, writer, first)) {
            first = false;
        }
    }
    // check if this node should be skipped from the dump
    final OptimizerNode n = node.getOptimizerNode();
    // start a new node and output node id
    if (!first) {
        writer.print(",\n");
    }
    // open the node
    writer.print("\t{\n");
    // recurse, it is is an iteration node
    if (node instanceof BulkIterationNode || node instanceof BulkIterationPlanNode) {
        DumpableNode<?> innerChild = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getNextPartialSolution() : ((BulkIterationPlanNode) node).getRootOfStepFunction();
        DumpableNode<?> begin = node instanceof BulkIterationNode ? ((BulkIterationNode) node).getPartialSolution() : ((BulkIterationPlanNode) node).getPartialSolutionPlanNode();
        writer.print("\t\t\"step_function\": [\n");
        visit(innerChild, writer, true);
        writer.print("\n\t\t],\n");
        writer.print("\t\t\"partial_solution\": " + this.nodeIds.get(begin) + ",\n");
        writer.print("\t\t\"next_partial_solution\": " + this.nodeIds.get(innerChild) + ",\n");
    } else if (node instanceof WorksetIterationNode || node instanceof WorksetIterationPlanNode) {
        DumpableNode<?> worksetRoot = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getNextWorkset() : ((WorksetIterationPlanNode) node).getNextWorkSetPlanNode();
        DumpableNode<?> solutionDelta = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetDelta() : ((WorksetIterationPlanNode) node).getSolutionSetDeltaPlanNode();
        DumpableNode<?> workset = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getWorksetNode() : ((WorksetIterationPlanNode) node).getWorksetPlanNode();
        DumpableNode<?> solutionSet = node instanceof WorksetIterationNode ? ((WorksetIterationNode) node).getSolutionSetNode() : ((WorksetIterationPlanNode) node).getSolutionSetPlanNode();
        writer.print("\t\t\"step_function\": [\n");
        visit(worksetRoot, writer, true);
        visit(solutionDelta, writer, false);
        writer.print("\n\t\t],\n");
        writer.print("\t\t\"workset\": " + this.nodeIds.get(workset) + ",\n");
        writer.print("\t\t\"solution_set\": " + this.nodeIds.get(solutionSet) + ",\n");
        writer.print("\t\t\"next_workset\": " + this.nodeIds.get(worksetRoot) + ",\n");
        writer.print("\t\t\"solution_delta\": " + this.nodeIds.get(solutionDelta) + ",\n");
    }
    // print the id
    writer.print("\t\t\"id\": " + this.nodeIds.get(node));
    final String type;
    String contents;
    if (n instanceof DataSinkNode) {
        type = "sink";
        contents = n.getOperator().toString();
    } else if (n instanceof DataSourceNode) {
        type = "source";
        contents = n.getOperator().toString();
    } else if (n instanceof BulkIterationNode) {
        type = "bulk_iteration";
        contents = n.getOperator().getName();
    } else if (n instanceof WorksetIterationNode) {
        type = "workset_iteration";
        contents = n.getOperator().getName();
    } else if (n instanceof BinaryUnionNode) {
        type = "pact";
        contents = "";
    } else {
        type = "pact";
        contents = n.getOperator().getName();
    }
    contents = StringUtils.showControlCharacters(contents);
    if (encodeForHTML) {
        contents = StringEscapeUtils.escapeHtml4(contents);
        contents = contents.replace("\\", "&#92;");
    }
    String name = n.getOperatorName();
    if (name.equals("Reduce") && (node instanceof SingleInputPlanNode) && ((SingleInputPlanNode) node).getDriverStrategy() == DriverStrategy.SORTED_GROUP_COMBINE) {
        name = "Combine";
    }
    // output the type identifier
    writer.print(",\n\t\t\"type\": \"" + type + "\"");
    // output node name
    writer.print(",\n\t\t\"pact\": \"" + name + "\"");
    // output node contents
    writer.print(",\n\t\t\"contents\": \"" + contents + "\"");
    // parallelism
    writer.print(",\n\t\t\"parallelism\": \"" + (n.getParallelism() >= 1 ? n.getParallelism() : "default") + "\"");
    // output node predecessors
    Iterator<? extends DumpableConnection<?>> inConns = node.getDumpableInputs().iterator();
    String child1name = "", child2name = "";
    if (inConns != null && inConns.hasNext()) {
        // start predecessor list
        writer.print(",\n\t\t\"predecessors\": [");
        int inputNum = 0;
        while (inConns.hasNext()) {
            final DumpableConnection<?> inConn = inConns.next();
            final DumpableNode<?> source = inConn.getSource();
            writer.print(inputNum == 0 ? "\n" : ",\n");
            if (inputNum == 0) {
                child1name += child1name.length() > 0 ? ", " : "";
                child1name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
            } else if (inputNum == 1) {
                child2name += child2name.length() > 0 ? ", " : "";
                child2name += source.getOptimizerNode().getOperator().getName() + " (id: " + this.nodeIds.get(source) + ")";
            }
            // output predecessor id
            writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
            // output connection side
            if (inConns.hasNext() || inputNum > 0) {
                writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
            }
            // output shipping strategy and channel type
            final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
            final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() : inConn.getShipStrategy();
            String shipStrategy = null;
            if (shipType != null) {
                switch(shipType) {
                    case NONE:
                        // nothing
                        break;
                    case FORWARD:
                        shipStrategy = "Forward";
                        break;
                    case BROADCAST:
                        shipStrategy = "Broadcast";
                        break;
                    case PARTITION_HASH:
                        shipStrategy = "Hash Partition";
                        break;
                    case PARTITION_RANGE:
                        shipStrategy = "Range Partition";
                        break;
                    case PARTITION_RANDOM:
                        shipStrategy = "Redistribute";
                        break;
                    case PARTITION_FORCED_REBALANCE:
                        shipStrategy = "Rebalance";
                        break;
                    case PARTITION_CUSTOM:
                        shipStrategy = "Custom Partition";
                        break;
                    default:
                        throw new CompilerException("Unknown ship strategy '" + inConn.getShipStrategy().name() + "' in JSON generator.");
                }
            }
            if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
                shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ? channel.getShipStrategyKeys().toString() : Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
            }
            if (shipStrategy != null) {
                writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
            }
            if (channel != null) {
                String localStrategy = null;
                switch(channel.getLocalStrategy()) {
                    case NONE:
                        break;
                    case SORT:
                        localStrategy = "Sort";
                        break;
                    case COMBININGSORT:
                        localStrategy = "Sort (combining)";
                        break;
                    default:
                        throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
                }
                if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
                    localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ? channel.getLocalStrategyKeys().toString() : Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
                }
                if (localStrategy != null) {
                    writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
                }
                if (channel != null && channel.getTempMode() != TempMode.NONE) {
                    String tempMode = channel.getTempMode().toString();
                    writer.print(", \"temp_mode\": \"" + tempMode + "\"");
                }
                if (channel != null) {
                    String exchangeMode = channel.getDataExchangeMode().toString();
                    writer.print(", \"exchange_mode\": \"" + exchangeMode + "\"");
                }
            }
            writer.print('}');
            inputNum++;
        }
        // finish predecessors
        writer.print("\n\t\t]");
    }
    //---------------------------------------------------------------------------------------
    // the part below here is relevant only to plan nodes with concrete strategies, etc
    //---------------------------------------------------------------------------------------
    final PlanNode p = node.getPlanNode();
    if (p == null) {
        // finish node
        writer.print("\n\t}");
        return true;
    }
    // local strategy
    String locString = null;
    if (p.getDriverStrategy() != null) {
        switch(p.getDriverStrategy()) {
            case NONE:
            case BINARY_NO_OP:
                break;
            case UNARY_NO_OP:
                locString = "No-Op";
                break;
            case MAP:
                locString = "Map";
                break;
            case FLAT_MAP:
                locString = "FlatMap";
                break;
            case MAP_PARTITION:
                locString = "Map Partition";
                break;
            case ALL_REDUCE:
                locString = "Reduce All";
                break;
            case ALL_GROUP_REDUCE:
            case ALL_GROUP_REDUCE_COMBINE:
                locString = "Group Reduce All";
                break;
            case SORTED_REDUCE:
                locString = "Sorted Reduce";
                break;
            case SORTED_PARTIAL_REDUCE:
                locString = "Sorted Combine/Reduce";
                break;
            case SORTED_GROUP_REDUCE:
                locString = "Sorted Group Reduce";
                break;
            case SORTED_GROUP_COMBINE:
                locString = "Sorted Combine";
                break;
            case HYBRIDHASH_BUILD_FIRST:
                locString = "Hybrid Hash (build: " + child1name + ")";
                break;
            case HYBRIDHASH_BUILD_SECOND:
                locString = "Hybrid Hash (build: " + child2name + ")";
                break;
            case HYBRIDHASH_BUILD_FIRST_CACHED:
                locString = "Hybrid Hash (CACHED) (build: " + child1name + ")";
                break;
            case HYBRIDHASH_BUILD_SECOND_CACHED:
                locString = "Hybrid Hash (CACHED) (build: " + child2name + ")";
                break;
            case NESTEDLOOP_BLOCKED_OUTER_FIRST:
                locString = "Nested Loops (Blocked Outer: " + child1name + ")";
                break;
            case NESTEDLOOP_BLOCKED_OUTER_SECOND:
                locString = "Nested Loops (Blocked Outer: " + child2name + ")";
                break;
            case NESTEDLOOP_STREAMED_OUTER_FIRST:
                locString = "Nested Loops (Streamed Outer: " + child1name + ")";
                break;
            case NESTEDLOOP_STREAMED_OUTER_SECOND:
                locString = "Nested Loops (Streamed Outer: " + child2name + ")";
                break;
            case INNER_MERGE:
                locString = "Merge";
                break;
            case CO_GROUP:
                locString = "Co-Group";
                break;
            default:
                locString = p.getDriverStrategy().name();
                break;
        }
        if (locString != null) {
            writer.print(",\n\t\t\"driver_strategy\": \"");
            writer.print(locString);
            writer.print("\"");
        }
    }
    {
        // output node global properties
        final GlobalProperties gp = p.getGlobalProperties();
        writer.print(",\n\t\t\"global_properties\": [\n");
        addProperty(writer, "Partitioning", gp.getPartitioning().name(), true);
        if (gp.getPartitioningFields() != null) {
            addProperty(writer, "Partitioned on", gp.getPartitioningFields().toString(), false);
        }
        if (gp.getPartitioningOrdering() != null) {
            addProperty(writer, "Partitioning Order", gp.getPartitioningOrdering().toString(), false);
        } else {
            addProperty(writer, "Partitioning Order", "(none)", false);
        }
        if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
            addProperty(writer, "Uniqueness", "not unique", false);
        } else {
            addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
        }
        writer.print("\n\t\t]");
    }
    {
        // output node local properties
        LocalProperties lp = p.getLocalProperties();
        writer.print(",\n\t\t\"local_properties\": [\n");
        if (lp.getOrdering() != null) {
            addProperty(writer, "Order", lp.getOrdering().toString(), true);
        } else {
            addProperty(writer, "Order", "(none)", true);
        }
        if (lp.getGroupedFields() != null && lp.getGroupedFields().size() > 0) {
            addProperty(writer, "Grouped on", lp.getGroupedFields().toString(), false);
        } else {
            addProperty(writer, "Grouping", "not grouped", false);
        }
        if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
            addProperty(writer, "Uniqueness", "not unique", false);
        } else {
            addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false);
        }
        writer.print("\n\t\t]");
    }
    // output node size estimates
    writer.print(",\n\t\t\"estimates\": [\n");
    addProperty(writer, "Est. Output Size", n.getEstimatedOutputSize() == -1 ? "(unknown)" : formatNumber(n.getEstimatedOutputSize(), "B"), true);
    addProperty(writer, "Est. Cardinality", n.getEstimatedNumRecords() == -1 ? "(unknown)" : formatNumber(n.getEstimatedNumRecords()), false);
    writer.print("\t\t]");
    // output node cost
    if (p.getNodeCosts() != null) {
        writer.print(",\n\t\t\"costs\": [\n");
        addProperty(writer, "Network", p.getNodeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getNetworkCost(), "B"), true);
        addProperty(writer, "Disk I/O", p.getNodeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getDiskCost(), "B"), false);
        addProperty(writer, "CPU", p.getNodeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getNodeCosts().getCpuCost(), ""), false);
        addProperty(writer, "Cumulative Network", p.getCumulativeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getNetworkCost(), "B"), false);
        addProperty(writer, "Cumulative Disk I/O", p.getCumulativeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getDiskCost(), "B"), false);
        addProperty(writer, "Cumulative CPU", p.getCumulativeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p.getCumulativeCosts().getCpuCost(), ""), false);
        writer.print("\n\t\t]");
    }
    // output the node compiler hints
    if (n.getOperator().getCompilerHints() != null) {
        CompilerHints hints = n.getOperator().getCompilerHints();
        CompilerHints defaults = new CompilerHints();
        String size = hints.getOutputSize() == defaults.getOutputSize() ? "(none)" : String.valueOf(hints.getOutputSize());
        String card = hints.getOutputCardinality() == defaults.getOutputCardinality() ? "(none)" : String.valueOf(hints.getOutputCardinality());
        String width = hints.getAvgOutputRecordSize() == defaults.getAvgOutputRecordSize() ? "(none)" : String.valueOf(hints.getAvgOutputRecordSize());
        String filter = hints.getFilterFactor() == defaults.getFilterFactor() ? "(none)" : String.valueOf(hints.getFilterFactor());
        writer.print(",\n\t\t\"compiler_hints\": [\n");
        addProperty(writer, "Output Size (bytes)", size, true);
        addProperty(writer, "Output Cardinality", card, false);
        addProperty(writer, "Avg. Output Record Size (bytes)", width, false);
        addProperty(writer, "Filter Factor", filter, false);
        writer.print("\t\t]");
    }
    // finish node
    writer.print("\n\t}");
    return true;
}
Also used : DataSourceNode(org.apache.flink.optimizer.dag.DataSourceNode) CompilerHints(org.apache.flink.api.common.operators.CompilerHints) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DataSinkNode(org.apache.flink.optimizer.dag.DataSinkNode) Channel(org.apache.flink.optimizer.plan.Channel) BinaryUnionNode(org.apache.flink.optimizer.dag.BinaryUnionNode) BulkIterationNode(org.apache.flink.optimizer.dag.BulkIterationNode) ShipStrategyType(org.apache.flink.runtime.operators.shipping.ShipStrategyType) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) OptimizerNode(org.apache.flink.optimizer.dag.OptimizerNode) WorksetIterationNode(org.apache.flink.optimizer.dag.WorksetIterationNode) GlobalProperties(org.apache.flink.optimizer.dataproperties.GlobalProperties) CompilerException(org.apache.flink.optimizer.CompilerException) LocalProperties(org.apache.flink.optimizer.dataproperties.LocalProperties) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode)

Example 20 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class JobGraphGenerator method postVisit.

/**
	 * This method implements the post-visit during the depth-first traversal. When the post visit happens,
	 * all of the descendants have been processed, so this method connects all of the current node's
	 * predecessors to the current node.
	 * 
	 * @param node
	 *        The node currently processed during the post-visit.
	 * @see org.apache.flink.util.Visitor#postVisit(org.apache.flink.util.Visitable) t
	 */
@Override
public void postVisit(PlanNode node) {
    try {
        // solution sets have no input. the initial solution set input is connected when the iteration node is in its postVisit
        if (node instanceof SourcePlanNode || node instanceof NAryUnionPlanNode || node instanceof SolutionSetPlanNode) {
            return;
        }
        // check if we have an iteration. in that case, translate the step function now
        if (node instanceof IterationPlanNode) {
            // prevent nested iterations
            if (node.isOnDynamicPath()) {
                throw new CompilerException("Nested Iterations are not possible at the moment!");
            }
            // another one), we push the current one onto the stack
            if (this.currentIteration != null) {
                this.iterationStack.add(this.currentIteration);
            }
            this.currentIteration = (IterationPlanNode) node;
            this.currentIteration.acceptForStepFunction(this);
            // pop the current iteration from the stack
            if (this.iterationStack.isEmpty()) {
                this.currentIteration = null;
            } else {
                this.currentIteration = this.iterationStack.remove(this.iterationStack.size() - 1);
            }
            // connect the initial solution set now.
            if (node instanceof WorksetIterationPlanNode) {
                // connect the initial solution set
                WorksetIterationPlanNode wsNode = (WorksetIterationPlanNode) node;
                JobVertex headVertex = this.iterations.get(wsNode).getHeadTask();
                TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration());
                int inputIndex = headConfig.getDriverStrategy().getNumInputs();
                headConfig.setIterationHeadSolutionSetInputIndex(inputIndex);
                translateChannel(wsNode.getInitialSolutionSetInput(), inputIndex, headVertex, headConfig, false);
            }
            return;
        }
        final JobVertex targetVertex = this.vertices.get(node);
        // check whether this node has its own task, or is merged with another one
        if (targetVertex == null) {
            // node's task is merged with another task. it is either chained, of a merged head vertex
            // from an iteration
            final TaskInChain chainedTask;
            if ((chainedTask = this.chainedTasks.get(node)) != null) {
                // Chained Task. Sanity check first...
                final Iterator<Channel> inConns = node.getInputs().iterator();
                if (!inConns.hasNext()) {
                    throw new CompilerException("Bug: Found chained task with no input.");
                }
                final Channel inConn = inConns.next();
                if (inConns.hasNext()) {
                    throw new CompilerException("Bug: Found a chained task with more than one input!");
                }
                if (inConn.getLocalStrategy() != null && inConn.getLocalStrategy() != LocalStrategy.NONE) {
                    throw new CompilerException("Bug: Found a chained task with an input local strategy.");
                }
                if (inConn.getShipStrategy() != null && inConn.getShipStrategy() != ShipStrategyType.FORWARD) {
                    throw new CompilerException("Bug: Found a chained task with an input ship strategy other than FORWARD.");
                }
                JobVertex container = chainedTask.getContainingVertex();
                if (container == null) {
                    final PlanNode sourceNode = inConn.getSource();
                    container = this.vertices.get(sourceNode);
                    if (container == null) {
                        // predecessor is itself chained
                        container = this.chainedTasks.get(sourceNode).getContainingVertex();
                        if (container == null) {
                            throw new IllegalStateException("Bug: Chained task predecessor has not been assigned its containing vertex.");
                        }
                    } else {
                        // predecessor is a proper task job vertex and this is the first chained task. add a forward connection entry.
                        new TaskConfig(container.getConfiguration()).addOutputShipStrategy(ShipStrategyType.FORWARD);
                    }
                    chainedTask.setContainingVertex(container);
                }
                // add info about the input serializer type
                chainedTask.getTaskConfig().setInputSerializer(inConn.getSerializer(), 0);
                // update name of container task
                String containerTaskName = container.getName();
                if (containerTaskName.startsWith("CHAIN ")) {
                    container.setName(containerTaskName + " -> " + chainedTask.getTaskName());
                } else {
                    container.setName("CHAIN " + containerTaskName + " -> " + chainedTask.getTaskName());
                }
                //update resource of container task
                container.setResources(container.getMinResources().merge(node.getMinResources()), container.getPreferredResources().merge(node.getPreferredResources()));
                this.chainedTasksInSequence.add(chainedTask);
                return;
            } else if (node instanceof BulkPartialSolutionPlanNode || node instanceof WorksetPlanNode) {
                // merged iteration head task. the task that the head is merged with will take care of it
                return;
            } else {
                throw new CompilerException("Bug: Unrecognized merged task vertex.");
            }
        }
        if (this.currentIteration != null) {
            JobVertex head = this.iterations.get(this.currentIteration).getHeadTask();
            // their execution determines the deployment slots of the co-location group
            if (node.isOnDynamicPath()) {
                targetVertex.setStrictlyCoLocatedWith(head);
            }
        }
        // create the config that will contain all the description of the inputs
        final TaskConfig targetVertexConfig = new TaskConfig(targetVertex.getConfiguration());
        // get the inputs. if this node is the head of an iteration, we obtain the inputs from the
        // enclosing iteration node, because the inputs are the initial inputs to the iteration.
        final Iterator<Channel> inConns;
        if (node instanceof BulkPartialSolutionPlanNode) {
            inConns = ((BulkPartialSolutionPlanNode) node).getContainingIterationNode().getInputs().iterator();
            // because the partial solution has its own vertex, is has only one (logical) input.
            // note this in the task configuration
            targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
        } else if (node instanceof WorksetPlanNode) {
            WorksetPlanNode wspn = (WorksetPlanNode) node;
            // input that is the initial workset
            inConns = Collections.singleton(wspn.getContainingIterationNode().getInput2()).iterator();
            // because we have a stand-alone (non-merged) workset iteration head, the initial workset will
            // be input 0 and the solution set will be input 1
            targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
            targetVertexConfig.setIterationHeadSolutionSetInputIndex(1);
        } else {
            inConns = node.getInputs().iterator();
        }
        if (!inConns.hasNext()) {
            throw new CompilerException("Bug: Found a non-source task with no input.");
        }
        int inputIndex = 0;
        while (inConns.hasNext()) {
            Channel input = inConns.next();
            inputIndex += translateChannel(input, inputIndex, targetVertex, targetVertexConfig, false);
        }
        // broadcast variables
        int broadcastInputIndex = 0;
        for (NamedChannel broadcastInput : node.getBroadcastInputs()) {
            int broadcastInputIndexDelta = translateChannel(broadcastInput, broadcastInputIndex, targetVertex, targetVertexConfig, true);
            targetVertexConfig.setBroadcastInputName(broadcastInput.getName(), broadcastInputIndex);
            targetVertexConfig.setBroadcastInputSerializer(broadcastInput.getSerializer(), broadcastInputIndex);
            broadcastInputIndex += broadcastInputIndexDelta;
        }
    } catch (Exception e) {
        throw new CompilerException("An error occurred while translating the optimized plan to a JobGraph: " + e.getMessage(), e);
    }
}
Also used : SolutionSetPlanNode(org.apache.flink.optimizer.plan.SolutionSetPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) BulkPartialSolutionPlanNode(org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode) Channel(org.apache.flink.optimizer.plan.Channel) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel) TaskConfig(org.apache.flink.runtime.operators.util.TaskConfig) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel) IOException(java.io.IOException) CompilerException(org.apache.flink.optimizer.CompilerException) IterationPlanNode(org.apache.flink.optimizer.plan.IterationPlanNode) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) NAryUnionPlanNode(org.apache.flink.optimizer.plan.NAryUnionPlanNode) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SolutionSetPlanNode(org.apache.flink.optimizer.plan.SolutionSetPlanNode) IterationPlanNode(org.apache.flink.optimizer.plan.IterationPlanNode) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) WorksetPlanNode(org.apache.flink.optimizer.plan.WorksetPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) SourcePlanNode(org.apache.flink.optimizer.plan.SourcePlanNode) BulkPartialSolutionPlanNode(org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) NAryUnionPlanNode(org.apache.flink.optimizer.plan.NAryUnionPlanNode) SourcePlanNode(org.apache.flink.optimizer.plan.SourcePlanNode) CompilerException(org.apache.flink.optimizer.CompilerException) WorksetPlanNode(org.apache.flink.optimizer.plan.WorksetPlanNode)

Aggregations

WorksetIterationPlanNode (org.apache.flink.optimizer.plan.WorksetIterationPlanNode)27 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)21 DualInputPlanNode (org.apache.flink.optimizer.plan.DualInputPlanNode)20 SingleInputPlanNode (org.apache.flink.optimizer.plan.SingleInputPlanNode)18 PlanNode (org.apache.flink.optimizer.plan.PlanNode)16 Plan (org.apache.flink.api.common.Plan)15 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)15 Test (org.junit.Test)15 SourcePlanNode (org.apache.flink.optimizer.plan.SourcePlanNode)13 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)12 BulkIterationPlanNode (org.apache.flink.optimizer.plan.BulkIterationPlanNode)12 Channel (org.apache.flink.optimizer.plan.Channel)12 CompilerException (org.apache.flink.optimizer.CompilerException)11 SolutionSetPlanNode (org.apache.flink.optimizer.plan.SolutionSetPlanNode)11 WorksetPlanNode (org.apache.flink.optimizer.plan.WorksetPlanNode)11 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)10 BulkPartialSolutionPlanNode (org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode)10 NAryUnionPlanNode (org.apache.flink.optimizer.plan.NAryUnionPlanNode)10 IterationPlanNode (org.apache.flink.optimizer.plan.IterationPlanNode)8 JobGraphGenerator (org.apache.flink.optimizer.plantranslate.JobGraphGenerator)8