Search in sources :

Example 46 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class PregelCompilerTest method testPregelCompiler.

@SuppressWarnings("serial")
@Test
public void testPregelCompiler() {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(DEFAULT_PARALLELISM);
    // compose test program
    {
        DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<>());
        DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

            public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
            }
        });
        Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
        DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), null, 100).getVertices();
        result.output(new DiscardingOutputFormat<>());
    }
    Plan p = env.createProgramPlan("Pregel Connected Components");
    OptimizedPlan op = compileNoStats(p);
    // check the sink
    SinkPlanNode sink = op.getDataSinks().iterator().next();
    assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
    assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
    // check the iteration
    WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
    assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
    // check the solution set delta
    PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
    assertTrue(ssDelta instanceof SingleInputPlanNode);
    SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
    assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
    assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
    // check the computation coGroup
    DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
    assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
    assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
    assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
    assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
    assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
    // check that the initial partitioning is pushed out of the loop
    assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
    assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 47 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class PregelCompilerTest method testPregelWithCombiner.

@SuppressWarnings("serial")
@Test
public void testPregelWithCombiner() {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(DEFAULT_PARALLELISM);
    // compose test program
    {
        DataSet<Vertex<Long, Long>> initialVertices = env.fromElements(new Tuple2<>(1L, 1L), new Tuple2<>(2L, 2L)).map(new Tuple2ToVertexMap<>());
        DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple2<>(1L, 2L)).map(new MapFunction<Tuple2<Long, Long>, Edge<Long, NullValue>>() {

            public Edge<Long, NullValue> map(Tuple2<Long, Long> edge) {
                return new Edge<>(edge.f0, edge.f1, NullValue.getInstance());
            }
        });
        Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);
        DataSet<Vertex<Long, Long>> result = graph.runVertexCentricIteration(new CCCompute(), new CCCombiner(), 100).getVertices();
        result.output(new DiscardingOutputFormat<>());
    }
    Plan p = env.createProgramPlan("Pregel Connected Components");
    OptimizedPlan op = compileNoStats(p);
    // check the sink
    SinkPlanNode sink = op.getDataSinks().iterator().next();
    assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
    assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
    // check the iteration
    WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
    assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
    // check the combiner
    SingleInputPlanNode combiner = (SingleInputPlanNode) iteration.getInput2().getSource();
    assertEquals(ShipStrategyType.FORWARD, combiner.getInput().getShipStrategy());
    // check the solution set delta
    PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
    assertTrue(ssDelta instanceof SingleInputPlanNode);
    SingleInputPlanNode ssFlatMap = (SingleInputPlanNode) ((SingleInputPlanNode) (ssDelta)).getInput().getSource();
    assertEquals(DEFAULT_PARALLELISM, ssFlatMap.getParallelism());
    assertEquals(ShipStrategyType.FORWARD, ssFlatMap.getInput().getShipStrategy());
    // check the computation coGroup
    DualInputPlanNode computationCoGroup = (DualInputPlanNode) (ssFlatMap.getInput().getSource());
    assertEquals(DEFAULT_PARALLELISM, computationCoGroup.getParallelism());
    assertEquals(ShipStrategyType.FORWARD, computationCoGroup.getInput1().getShipStrategy());
    assertEquals(ShipStrategyType.PARTITION_HASH, computationCoGroup.getInput2().getShipStrategy());
    assertTrue(computationCoGroup.getInput2().getTempMode().isCached());
    assertEquals(new FieldList(0), computationCoGroup.getInput2().getShipStrategyKeys());
    // check that the initial partitioning is pushed out of the loop
    assertEquals(ShipStrategyType.PARTITION_HASH, iteration.getInput1().getShipStrategy());
    assertEquals(new FieldList(0), iteration.getInput1().getShipStrategyKeys());
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple2ToVertexMap(org.apache.flink.graph.utils.Tuple2ToVertexMap) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) MapFunction(org.apache.flink.api.common.functions.MapFunction) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Edge(org.apache.flink.graph.Edge) Test(org.junit.Test)

Example 48 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class BulkIterationTranslationTest method testCorrectTranslation.

@Test
public void testCorrectTranslation() {
    final String jobName = "Test JobName";
    final int numIterations = 13;
    final int defaultParallelism = 133;
    final int iterationParallelism = 77;
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    // ------------ construct the test program ------------------
    {
        env.setParallelism(defaultParallelism);
        @SuppressWarnings("unchecked") DataSet<Tuple3<Double, Long, String>> initialDataSet = env.fromElements(new Tuple3<>(3.44, 5L, "abc"));
        IterativeDataSet<Tuple3<Double, Long, String>> bulkIteration = initialDataSet.iterate(numIterations);
        bulkIteration.setParallelism(iterationParallelism);
        // test that multiple iteration consumers are supported
        DataSet<Tuple3<Double, Long, String>> identity = bulkIteration.map(new IdentityMapper<Tuple3<Double, Long, String>>());
        DataSet<Tuple3<Double, Long, String>> result = bulkIteration.closeWith(identity);
        result.output(new DiscardingOutputFormat<Tuple3<Double, Long, String>>());
        result.writeAsText("/dev/null");
    }
    Plan p = env.createProgramPlan(jobName);
    // ------------- validate the plan ----------------
    BulkIterationBase<?> iteration = (BulkIterationBase<?>) p.getDataSinks().iterator().next().getInput();
    assertEquals(jobName, p.getJobName());
    assertEquals(defaultParallelism, p.getDefaultParallelism());
    assertEquals(iterationParallelism, iteration.getParallelism());
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) DataSet(org.apache.flink.api.java.DataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) Plan(org.apache.flink.api.common.Plan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) Tuple3(org.apache.flink.api.java.tuple.Tuple3) BulkIterationBase(org.apache.flink.api.common.operators.base.BulkIterationBase) Test(org.junit.Test)

Example 49 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class WritableSavepointWindowITCase method testSlideWindow.

@Test
public void testSlideWindow() throws Exception {
    final String savepointPath = getTempDirPath(new AbstractID().toHexString());
    ExecutionEnvironment bEnv = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Tuple2<String, Integer>> bootstrapData = bEnv.fromCollection(WORDS).map(word -> Tuple2.of(word, 1)).returns(TUPLE_TYPE_INFO);
    WindowedOperatorTransformation<Tuple2<String, Integer>, String, TimeWindow> transformation = OperatorTransformation.bootstrapWith(bootstrapData).assignTimestamps(record -> 2L).keyBy(tuple -> tuple.f0, Types.STRING).window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(1)));
    Savepoint.create(new MemoryStateBackend(), 128).withOperator(UID, windowBootstrap.bootstrap(transformation)).write(savepointPath);
    bEnv.execute("write state");
    StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    WindowedStream<Tuple2<String, Integer>, String, TimeWindow> stream = sEnv.addSource(new MaxWatermarkSource<Tuple2<String, Integer>>()).returns(TUPLE_TYPE_INFO).keyBy(tuple -> tuple.f0).window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(1)));
    DataStream<Tuple2<String, Integer>> windowed = windowStream.window(stream).uid(UID);
    CompletableFuture<Collection<Tuple2<String, Integer>>> future = collector.collect(windowed);
    submitJob(savepointPath, sEnv);
    Collection<Tuple2<String, Integer>> results = future.get();
    Assert.assertEquals("Incorrect number of results", 15, results.size());
    Assert.assertThat("Incorrect bootstrap state", new HashSet<>(results), STANDARD_MATCHER);
}
Also used : Arrays(java.util.Arrays) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) DataSet(org.apache.flink.api.java.DataSet) StateBackend(org.apache.flink.runtime.state.StateBackend) StreamCollector(org.apache.flink.streaming.util.StreamCollector) WindowedStream(org.apache.flink.streaming.api.datastream.WindowedStream) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) AbstractID(org.apache.flink.util.AbstractID) Collection(java.util.Collection) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) List(java.util.List) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) TumblingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows) ClusterClient(org.apache.flink.client.program.ClusterClient) SerializedThrowable(org.apache.flink.util.SerializedThrowable) Optional(java.util.Optional) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RunWith(org.junit.runner.RunWith) CompletableFuture(java.util.concurrent.CompletableFuture) CountEvictor(org.apache.flink.streaming.api.windowing.evictors.CountEvictor) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Collector(org.apache.flink.util.Collector) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) Types(org.apache.flink.api.common.typeinfo.Types) Time(org.apache.flink.streaming.api.windowing.time.Time) Iterator(java.util.Iterator) SingleOutputStreamOperator(org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator) SlidingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows) Matchers(org.hamcrest.Matchers) Test(org.junit.Test) MaxWatermarkSource(org.apache.flink.state.api.utils.MaxWatermarkSource) ProcessWindowFunction(org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction) RocksDBStateBackend(org.apache.flink.contrib.streaming.state.RocksDBStateBackend) DataStream(org.apache.flink.streaming.api.datastream.DataStream) WindowFunction(org.apache.flink.streaming.api.functions.windowing.WindowFunction) Rule(org.junit.Rule) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) Matcher(org.hamcrest.Matcher) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) MaxWatermarkSource(org.apache.flink.state.api.utils.MaxWatermarkSource) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) AbstractID(org.apache.flink.util.AbstractID) Test(org.junit.Test)

Example 50 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class WritableSavepointWindowITCase method testSlideWindowWithEvictor.

@Test
public void testSlideWindowWithEvictor() throws Exception {
    final String savepointPath = getTempDirPath(new AbstractID().toHexString());
    ExecutionEnvironment bEnv = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Tuple2<String, Integer>> bootstrapData = bEnv.fromCollection(WORDS).map(word -> Tuple2.of(word, 1)).returns(TUPLE_TYPE_INFO);
    WindowedOperatorTransformation<Tuple2<String, Integer>, String, TimeWindow> transformation = OperatorTransformation.bootstrapWith(bootstrapData).assignTimestamps(record -> 2L).keyBy(tuple -> tuple.f0, Types.STRING).window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(1))).evictor(CountEvictor.of(1));
    Savepoint.create(new MemoryStateBackend(), 128).withOperator(UID, windowBootstrap.bootstrap(transformation)).write(savepointPath);
    bEnv.execute("write state");
    StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    WindowedStream<Tuple2<String, Integer>, String, TimeWindow> stream = sEnv.addSource(new MaxWatermarkSource<Tuple2<String, Integer>>()).returns(TUPLE_TYPE_INFO).keyBy(tuple -> tuple.f0).window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(1))).evictor(CountEvictor.of(1));
    DataStream<Tuple2<String, Integer>> windowed = windowStream.window(stream).uid(UID);
    CompletableFuture<Collection<Tuple2<String, Integer>>> future = collector.collect(windowed);
    submitJob(savepointPath, sEnv);
    Collection<Tuple2<String, Integer>> results = future.get();
    Assert.assertEquals("Incorrect number of results", 15, results.size());
    Assert.assertThat("Incorrect bootstrap state", new HashSet<>(results), EVICTOR_MATCHER);
}
Also used : Arrays(java.util.Arrays) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) DataSet(org.apache.flink.api.java.DataSet) StateBackend(org.apache.flink.runtime.state.StateBackend) StreamCollector(org.apache.flink.streaming.util.StreamCollector) WindowedStream(org.apache.flink.streaming.api.datastream.WindowedStream) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) AbstractID(org.apache.flink.util.AbstractID) Collection(java.util.Collection) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) List(java.util.List) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) TumblingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows) ClusterClient(org.apache.flink.client.program.ClusterClient) SerializedThrowable(org.apache.flink.util.SerializedThrowable) Optional(java.util.Optional) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RunWith(org.junit.runner.RunWith) CompletableFuture(java.util.concurrent.CompletableFuture) CountEvictor(org.apache.flink.streaming.api.windowing.evictors.CountEvictor) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Collector(org.apache.flink.util.Collector) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) Types(org.apache.flink.api.common.typeinfo.Types) Time(org.apache.flink.streaming.api.windowing.time.Time) Iterator(java.util.Iterator) SingleOutputStreamOperator(org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator) SlidingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows) Matchers(org.hamcrest.Matchers) Test(org.junit.Test) MaxWatermarkSource(org.apache.flink.state.api.utils.MaxWatermarkSource) ProcessWindowFunction(org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction) RocksDBStateBackend(org.apache.flink.contrib.streaming.state.RocksDBStateBackend) DataStream(org.apache.flink.streaming.api.datastream.DataStream) WindowFunction(org.apache.flink.streaming.api.functions.windowing.WindowFunction) Rule(org.junit.Rule) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) Matcher(org.hamcrest.Matcher) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) AbstractID(org.apache.flink.util.AbstractID) Test(org.junit.Test)

Aggregations

DataSet (org.apache.flink.api.java.DataSet)56 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)31 Test (org.junit.Test)24 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)17 DiscardingOutputFormat (org.apache.flink.api.java.io.DiscardingOutputFormat)11 Plan (org.apache.flink.api.common.Plan)10 Types (org.apache.flink.api.common.typeinfo.Types)10 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)10 Assert (org.junit.Assert)10 Arrays (java.util.Arrays)9 Rule (org.junit.Rule)9 List (java.util.List)8 MapFunction (org.apache.flink.api.common.functions.MapFunction)8 Configuration (org.apache.flink.configuration.Configuration)7 Graph (org.apache.flink.graph.Graph)7 NullValue (org.apache.flink.types.NullValue)7 ArrayList (java.util.ArrayList)6 GroupReduceFunction (org.apache.flink.api.common.functions.GroupReduceFunction)6 KeySelector (org.apache.flink.api.java.functions.KeySelector)6 PythonMapPartition (org.apache.flink.python.api.functions.PythonMapPartition)6