Search in sources :

Example 96 with JobGraph

use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.

the class StreamingJobGraphGeneratorTest method testResourcesForIteration.

/**
	 * Verifies that the resources are merged correctly for chained operators (covers middle chaining and iteration cases)
	 * when generating job graph
	 */
@Test
public void testResourcesForIteration() throws Exception {
    ResourceSpec resource1 = new ResourceSpec(0.1, 100);
    ResourceSpec resource2 = new ResourceSpec(0.2, 200);
    ResourceSpec resource3 = new ResourceSpec(0.3, 300);
    ResourceSpec resource4 = new ResourceSpec(0.4, 400);
    ResourceSpec resource5 = new ResourceSpec(0.5, 500);
    Method opMethod = SingleOutputStreamOperator.class.getDeclaredMethod("setResources", ResourceSpec.class);
    opMethod.setAccessible(true);
    Method sinkMethod = DataStreamSink.class.getDeclaredMethod("setResources", ResourceSpec.class);
    sinkMethod.setAccessible(true);
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Integer> source = env.addSource(new ParallelSourceFunction<Integer>() {

        @Override
        public void run(SourceContext<Integer> ctx) throws Exception {
        }

        @Override
        public void cancel() {
        }
    }).name("test_source");
    opMethod.invoke(source, resource1);
    IterativeStream<Integer> iteration = source.iterate(3000);
    opMethod.invoke(iteration, resource2);
    DataStream<Integer> flatMap = iteration.flatMap(new FlatMapFunction<Integer, Integer>() {

        @Override
        public void flatMap(Integer value, Collector<Integer> out) throws Exception {
            out.collect(value);
        }
    }).name("test_flatMap");
    opMethod.invoke(flatMap, resource3);
    // CHAIN(flatMap -> Filter)
    DataStream<Integer> increment = flatMap.filter(new FilterFunction<Integer>() {

        @Override
        public boolean filter(Integer value) throws Exception {
            return false;
        }
    }).name("test_filter");
    opMethod.invoke(increment, resource4);
    DataStreamSink<Integer> sink = iteration.closeWith(increment).addSink(new SinkFunction<Integer>() {

        @Override
        public void invoke(Integer value) throws Exception {
        }
    }).disableChaining().name("test_sink");
    sinkMethod.invoke(sink, resource5);
    JobGraph jobGraph = new StreamingJobGraphGenerator(env.getStreamGraph(), 1).createJobGraph();
    for (JobVertex jobVertex : jobGraph.getVertices()) {
        if (jobVertex.getName().contains("test_source")) {
            assertTrue(jobVertex.getMinResources().equals(resource1));
        } else if (jobVertex.getName().contains("Iteration_Source")) {
            assertTrue(jobVertex.getPreferredResources().equals(resource2));
        } else if (jobVertex.getName().contains("test_flatMap")) {
            assertTrue(jobVertex.getMinResources().equals(resource3.merge(resource4)));
        } else if (jobVertex.getName().contains("Iteration_Tail")) {
            assertTrue(jobVertex.getPreferredResources().equals(ResourceSpec.DEFAULT));
        } else if (jobVertex.getName().contains("test_sink")) {
            assertTrue(jobVertex.getMinResources().equals(resource5));
        }
    }
}
Also used : FilterFunction(org.apache.flink.api.common.functions.FilterFunction) ResourceSpec(org.apache.flink.api.common.operators.ResourceSpec) Method(java.lang.reflect.Method) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) Collector(org.apache.flink.util.Collector) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 97 with JobGraph

use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.

the class StreamingJobGraphGeneratorTest method testParallelismOneNotChained.

@Test
public void testParallelismOneNotChained() {
    // --------- the program ---------
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    DataStream<Tuple2<String, String>> input = env.fromElements("a", "b", "c", "d", "e", "f").map(new MapFunction<String, Tuple2<String, String>>() {

        private static final long serialVersionUID = 471891682418382583L;

        @Override
        public Tuple2<String, String> map(String value) {
            return new Tuple2<>(value, value);
        }
    });
    DataStream<Tuple2<String, String>> result = input.keyBy(0).map(new MapFunction<Tuple2<String, String>, Tuple2<String, String>>() {

        private static final long serialVersionUID = 3583760206245136188L;

        @Override
        public Tuple2<String, String> map(Tuple2<String, String> value) {
            return value;
        }
    });
    result.addSink(new SinkFunction<Tuple2<String, String>>() {

        private static final long serialVersionUID = -5614849094269539342L;

        @Override
        public void invoke(Tuple2<String, String> value) {
        }
    });
    // --------- the job graph ---------
    StreamGraph streamGraph = env.getStreamGraph();
    streamGraph.setJobName("test job");
    JobGraph jobGraph = streamGraph.getJobGraph();
    List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
    assertEquals(2, jobGraph.getNumberOfVertices());
    assertEquals(1, verticesSorted.get(0).getParallelism());
    assertEquals(1, verticesSorted.get(1).getParallelism());
    JobVertex sourceVertex = verticesSorted.get(0);
    JobVertex mapSinkVertex = verticesSorted.get(1);
    assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
    assertEquals(ResultPartitionType.PIPELINED_BOUNDED, mapSinkVertex.getInputs().get(0).getSource().getResultType());
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 98 with JobGraph

use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.

the class StreamingJobGraphGeneratorTest method testDisabledCheckpointing.

/**
	 * Tests that disabled checkpointing sets the checkpointing interval to Long.MAX_VALUE.
	 */
@Test
public void testDisabledCheckpointing() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamGraph streamGraph = new StreamGraph(env, 1);
    assertFalse("Checkpointing enabled", streamGraph.getCheckpointConfig().isCheckpointingEnabled());
    StreamingJobGraphGenerator jobGraphGenerator = new StreamingJobGraphGenerator(streamGraph, 1);
    JobGraph jobGraph = jobGraphGenerator.createJobGraph();
    JobSnapshottingSettings snapshottingSettings = jobGraph.getSnapshotSettings();
    assertEquals(Long.MAX_VALUE, snapshottingSettings.getCheckpointInterval());
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobSnapshottingSettings(org.apache.flink.runtime.jobgraph.tasks.JobSnapshottingSettings) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 99 with JobGraph

use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.

the class SlotAllocationTest method testUnion.

@Test
public void testUnion() {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {

        @Override
        public boolean filter(Long value) {
            return false;
        }
    };
    DataStream<Long> src1 = env.generateSequence(1, 10);
    DataStream<Long> src2 = env.generateSequence(1, 10).slotSharingGroup("src-1");
    // this should not inherit group "src-1"
    src1.union(src2).filter(dummyFilter);
    DataStream<Long> src3 = env.generateSequence(1, 10).slotSharingGroup("group-1");
    DataStream<Long> src4 = env.generateSequence(1, 10).slotSharingGroup("group-1");
    // this should inherit "group-1" now
    src3.union(src4).filter(dummyFilter);
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
    // first pipeline
    assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
    assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(1).getSlotSharingGroup());
    assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
    // second pipeline
    assertEquals(vertices.get(2).getSlotSharingGroup(), vertices.get(3).getSlotSharingGroup());
    assertEquals(vertices.get(2).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
    assertEquals(vertices.get(3).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
}
Also used : FilterFunction(org.apache.flink.api.common.functions.FilterFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 100 with JobGraph

use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.

the class SlotAllocationTest method testCoOperation.

@Test
public void testCoOperation() {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    CoMapFunction<Long, Long, Long> dummyCoMap = new CoMapFunction<Long, Long, Long>() {

        @Override
        public Long map1(Long value) throws Exception {
            return null;
        }

        @Override
        public Long map2(Long value) throws Exception {
            return null;
        }
    };
    DataStream<Long> src1 = env.generateSequence(1, 10);
    DataStream<Long> src2 = env.generateSequence(1, 10).slotSharingGroup("src-1");
    // this should not inherit group "src-1"
    src1.connect(src2).map(dummyCoMap);
    DataStream<Long> src3 = env.generateSequence(1, 10).slotSharingGroup("group-1");
    DataStream<Long> src4 = env.generateSequence(1, 10).slotSharingGroup("group-1");
    // this should inherit "group-1" now
    src3.connect(src4).map(dummyCoMap);
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
    // first pipeline
    assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
    assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(1).getSlotSharingGroup());
    assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
    // second pipeline
    assertEquals(vertices.get(2).getSlotSharingGroup(), vertices.get(3).getSlotSharingGroup());
    assertEquals(vertices.get(2).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
    assertEquals(vertices.get(3).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
}
Also used : CoMapFunction(org.apache.flink.streaming.api.functions.co.CoMapFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Aggregations

JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)131 Test (org.junit.Test)95 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)78 Configuration (org.apache.flink.configuration.Configuration)45 JobID (org.apache.flink.api.common.JobID)39 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)34 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)32 Deadline (scala.concurrent.duration.Deadline)31 FiniteDuration (scala.concurrent.duration.FiniteDuration)27 JobManagerMessages (org.apache.flink.runtime.messages.JobManagerMessages)20 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)18 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)17 SubmitJob (org.apache.flink.runtime.messages.JobManagerMessages.SubmitJob)15 TestingCluster (org.apache.flink.runtime.testingUtils.TestingCluster)15 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)14 TestingJobManagerMessages (org.apache.flink.runtime.testingUtils.TestingJobManagerMessages)14 IOException (java.io.IOException)13 ActorRef (akka.actor.ActorRef)12 Scheduler (org.apache.flink.runtime.jobmanager.scheduler.Scheduler)11 StreamGraph (org.apache.flink.streaming.api.graph.StreamGraph)11