Search in sources :

Example 1 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class ConnectedComponentsCoGroupTest method testWorksetConnectedComponents.

@Test
public void testWorksetConnectedComponents() {
    Plan plan = getConnectedComponentsCoGroupPlan();
    plan.setExecutionConfig(new ExecutionConfig());
    OptimizedPlan optPlan = compileNoStats(plan);
    OptimizerPlanNodeResolver or = getOptimizerPlanNodeResolver(optPlan);
    if (PRINT_PLAN) {
        PlanJSONDumpGenerator dumper = new PlanJSONDumpGenerator();
        String json = dumper.getOptimizerPlanAsJSON(optPlan);
        System.out.println(json);
    }
    SourcePlanNode vertexSource = or.getNode(VERTEX_SOURCE);
    SourcePlanNode edgesSource = or.getNode(EDGES_SOURCE);
    SinkPlanNode sink = or.getNode(SINK);
    WorksetIterationPlanNode iter = or.getNode(ITERATION_NAME);
    DualInputPlanNode neighborsJoin = or.getNode(JOIN_NEIGHBORS_MATCH);
    DualInputPlanNode cogroup = or.getNode(MIN_ID_AND_UPDATE);
    // --------------------------------------------------------------------
    // Plan validation:
    //
    // We expect the plan to go with a sort-merge join, because the CoGroup
    // sorts and the join in the successive iteration can re-exploit the sorting.
    // --------------------------------------------------------------------
    // test all drivers
    Assert.assertEquals(DriverStrategy.NONE, sink.getDriverStrategy());
    Assert.assertEquals(DriverStrategy.NONE, vertexSource.getDriverStrategy());
    Assert.assertEquals(DriverStrategy.NONE, edgesSource.getDriverStrategy());
    Assert.assertEquals(DriverStrategy.INNER_MERGE, neighborsJoin.getDriverStrategy());
    Assert.assertEquals(set0, neighborsJoin.getKeysForInput1());
    Assert.assertEquals(set0, neighborsJoin.getKeysForInput2());
    Assert.assertEquals(DriverStrategy.CO_GROUP, cogroup.getDriverStrategy());
    Assert.assertEquals(set0, cogroup.getKeysForInput1());
    Assert.assertEquals(set0, cogroup.getKeysForInput2());
    // test all the shipping strategies
    Assert.assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
    Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialSolutionSetInput().getShipStrategy());
    Assert.assertEquals(set0, iter.getInitialSolutionSetInput().getShipStrategyKeys());
    Assert.assertEquals(ShipStrategyType.PARTITION_HASH, iter.getInitialWorksetInput().getShipStrategy());
    Assert.assertEquals(set0, iter.getInitialWorksetInput().getShipStrategyKeys());
    // workset
    Assert.assertEquals(ShipStrategyType.FORWARD, neighborsJoin.getInput1().getShipStrategy());
    // edges
    Assert.assertEquals(ShipStrategyType.PARTITION_HASH, neighborsJoin.getInput2().getShipStrategy());
    Assert.assertEquals(set0, neighborsJoin.getInput2().getShipStrategyKeys());
    Assert.assertTrue(neighborsJoin.getInput2().getTempMode().isCached());
    // min id
    Assert.assertEquals(ShipStrategyType.PARTITION_HASH, cogroup.getInput1().getShipStrategy());
    // solution set
    Assert.assertEquals(ShipStrategyType.FORWARD, cogroup.getInput2().getShipStrategy());
    // test all the local strategies
    Assert.assertEquals(LocalStrategy.NONE, sink.getInput().getLocalStrategy());
    Assert.assertEquals(LocalStrategy.NONE, iter.getInitialSolutionSetInput().getLocalStrategy());
    // the sort for the neighbor join in the first iteration is pushed out of the loop
    Assert.assertEquals(LocalStrategy.SORT, iter.getInitialWorksetInput().getLocalStrategy());
    // workset
    Assert.assertEquals(LocalStrategy.NONE, neighborsJoin.getInput1().getLocalStrategy());
    // edges
    Assert.assertEquals(LocalStrategy.SORT, neighborsJoin.getInput2().getLocalStrategy());
    Assert.assertEquals(LocalStrategy.SORT, cogroup.getInput1().getLocalStrategy());
    // solution set
    Assert.assertEquals(LocalStrategy.NONE, cogroup.getInput2().getLocalStrategy());
    // check the caches
    Assert.assertTrue(TempMode.CACHED == neighborsJoin.getInput2().getTempMode());
    JobGraphGenerator jgg = new JobGraphGenerator();
    jgg.compileJobGraph(optPlan);
}
Also used : DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanJSONDumpGenerator(org.apache.flink.optimizer.plandump.PlanJSONDumpGenerator) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) JobGraphGenerator(org.apache.flink.optimizer.plantranslate.JobGraphGenerator) SourcePlanNode(org.apache.flink.optimizer.plan.SourcePlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) Test(org.junit.Test)

Example 2 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class PregelCompilerTest method testPregelCompilerWithBroadcastVariable.

@SuppressWarnings("serial")
@Test
public void testPregelCompilerWithBroadcastVariable() {
    try {
        final String BC_VAR_NAME = "borat variable";
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Long> bcVar = env.fromElements(1L);
            DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<Long, Long>());
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

                public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                    return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
                }
            });
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
            VertexCentricConfiguration parameters = new VertexCentricConfiguration();
            parameters.addBroadcastSet(BC_VAR_NAME, bcVar);
            DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), null, 100, parameters).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("Pregel Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        assertTrue(ssDelta instanceof SingleInputPlanNode);
        SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
        // check the computation coGroup
        DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
        assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
        assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
        assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
        assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
        // check that the initial partitioning is pushed out of the loop
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 3 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class SpargelCompilerTest method testSpargelCompilerWithBroadcastVariable.

@SuppressWarnings("serial")
@Test
public void testSpargelCompilerWithBroadcastVariable() {
    try {
        final String BC_VAR_NAME = "borat variable";
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Long> bcVar = env.fromElements(1L);
            DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<Long, Long>());
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

                public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                    return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
                }
            });
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
            ScatterGatherConfiguration parameters = new ScatterGatherConfiguration();
            parameters.addBroadcastSetForScatterFunction(BC_VAR_NAME, bcVar);
            parameters.addBroadcastSetForGatherFunction(BC_VAR_NAME, bcVar);
            DataSet<Vertex<Long, Long>> result = graph.runScatterGatherIteration(new ConnectedComponents.CCMessenger<Long, Long>(BasicTypeInfo.LONG_TYPE_INFO), new ConnectedComponents.CCUpdater<Long, Long>(), 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("Spargel Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set join and the delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        // this is only true if the update functions preserves the partitioning
        assertTrue(ssDelta instanceof DualInputPlanNode);
        DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
        assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());
        // check the workset set join
        DualInputPlanNode edgeJoin = (DualInputPlanNode) ssJoin.getInput1().getSource();
        assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput2().getShipStrategy());
        assertTrue(edgeJoin.getInput1().getTempMode().isCached());
        assertEquals(new FieldList(0), edgeJoin.getInput1().getShipStrategyKeys());
        // check that the initial partitioning is pushed out of the loop
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
        assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput2().getShipStrategy());
        assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
        assertEquals(new FieldList(0), iteration.getInput2().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) ConnectedComponents(org.apache.flink.graph.library.ConnectedComponents) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 4 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class GSACompilerTest method testGSACompiler.

@Test
public void testGSACompiler() {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<Long, NullValue>());
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);
            DataSet<Vertex<Long, Long>> result = graph.runGatherSumApplyIteration(new GatherNeighborIds(), new SelectMinId(), new UpdateComponentId(), 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("GSA Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        assertEquals(PartitioningProperty.HASH_PARTITIONED, sink.getGlobalProperties().getPartitioning());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set join and the delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        // this is only true if the update function preserves the partitioning
        assertTrue(ssDelta instanceof DualInputPlanNode);
        DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
        assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());
        // check the workset set join
        SingleInputPlanNode sumReducer = (SingleInputPlanNode) ssJoin.getInput1().getSource();
        SingleInputPlanNode gatherMapper = (SingleInputPlanNode) sumReducer.getInput().getSource();
        DualInputPlanNode edgeJoin = (DualInputPlanNode) gatherMapper.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
        // input1 is the workset
        assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput1().getShipStrategy());
        // input2 is the edges
        assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput2().getShipStrategy());
        assertTrue(edgeJoin.getInput2().getTempMode().isCached());
        assertEquals(new FieldList(0), edgeJoin.getInput2().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple3ToEdgeMap(org.apache.flink.graph.utils.Tuple3ToEdgeMap) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Test(org.junit.Test)

Example 5 with WorksetIterationPlanNode

use of org.apache.flink.optimizer.plan.WorksetIterationPlanNode in project flink by apache.

the class IterationsCompilerTest method testTwoIterationsDirectlyChained.

@Test
public void testTwoIterationsDirectlyChained() throws Exception {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(8);
        DataSet<Tuple2<Long, Long>> verticesWithInitialId = env.fromElements(new Tuple2<Long, Long>(1L, 2L));
        DataSet<Tuple2<Long, Long>> edges = env.fromElements(new Tuple2<Long, Long>(1L, 2L));
        DataSet<Tuple2<Long, Long>> bulkResult = doBulkIteration(verticesWithInitialId, edges);
        DataSet<Tuple2<Long, Long>> depResult = doDeltaIteration(bulkResult, edges);
        depResult.output(new DiscardingOutputFormat<Tuple2<Long, Long>>());
        Plan p = env.createProgramPlan();
        OptimizedPlan op = compileNoStats(p);
        assertEquals(1, op.getDataSinks().size());
        assertTrue(op.getDataSinks().iterator().next().getInput().getSource() instanceof WorksetIterationPlanNode);
        WorksetIterationPlanNode wipn = (WorksetIterationPlanNode) op.getDataSinks().iterator().next().getInput().getSource();
        BulkIterationPlanNode bipn = (BulkIterationPlanNode) wipn.getInput1().getSource();
        // the hash partitioning has been pushed out of the delta iteration into the bulk iteration
        assertEquals(ShipStrategyType.FORWARD, wipn.getInput1().getShipStrategy());
        // since the work has been pushed out of the bulk iteration, it has to guarantee the hash partitioning
        for (Channel c : bipn.getRootOfStepFunction().getInputs()) {
            assertEquals(ShipStrategyType.PARTITION_HASH, c.getShipStrategy());
        }
        assertEquals(DataExchangeMode.BATCH, wipn.getInput1().getDataExchangeMode());
        assertEquals(DataExchangeMode.BATCH, wipn.getInput2().getDataExchangeMode());
        assertEquals(TempMode.NONE, wipn.getInput1().getTempMode());
        assertEquals(TempMode.NONE, wipn.getInput2().getTempMode());
        new JobGraphGenerator().compileJobGraph(op);
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) Channel(org.apache.flink.optimizer.plan.Channel) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobGraphGenerator(org.apache.flink.optimizer.plantranslate.JobGraphGenerator) BulkIterationPlanNode(org.apache.flink.optimizer.plan.BulkIterationPlanNode) Test(org.junit.Test)

Aggregations

WorksetIterationPlanNode (org.apache.flink.optimizer.plan.WorksetIterationPlanNode)27 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)21 DualInputPlanNode (org.apache.flink.optimizer.plan.DualInputPlanNode)20 SingleInputPlanNode (org.apache.flink.optimizer.plan.SingleInputPlanNode)18 PlanNode (org.apache.flink.optimizer.plan.PlanNode)16 Plan (org.apache.flink.api.common.Plan)15 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)15 Test (org.junit.Test)15 SourcePlanNode (org.apache.flink.optimizer.plan.SourcePlanNode)13 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)12 BulkIterationPlanNode (org.apache.flink.optimizer.plan.BulkIterationPlanNode)12 Channel (org.apache.flink.optimizer.plan.Channel)12 CompilerException (org.apache.flink.optimizer.CompilerException)11 SolutionSetPlanNode (org.apache.flink.optimizer.plan.SolutionSetPlanNode)11 WorksetPlanNode (org.apache.flink.optimizer.plan.WorksetPlanNode)11 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)10 BulkPartialSolutionPlanNode (org.apache.flink.optimizer.plan.BulkPartialSolutionPlanNode)10 NAryUnionPlanNode (org.apache.flink.optimizer.plan.NAryUnionPlanNode)10 IterationPlanNode (org.apache.flink.optimizer.plan.IterationPlanNode)8 JobGraphGenerator (org.apache.flink.optimizer.plantranslate.JobGraphGenerator)8