Search in sources :

Example 96 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class SchedulerBenchmarkUtils method createJobGraph.

public static JobGraph createJobGraph(List<JobVertex> jobVertices, JobConfiguration jobConfiguration) throws IOException {
    final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertices.toArray(new JobVertex[0]));
    jobGraph.setJobType(jobConfiguration.getJobType());
    final ExecutionConfig executionConfig = new ExecutionConfig();
    executionConfig.setExecutionMode(jobConfiguration.getExecutionMode());
    jobGraph.setExecutionConfig(executionConfig);
    return jobGraph;
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) AccessExecutionJobVertex(org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig)

Example 97 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class SchedulerBenchmarkUtils method createDefaultJobVertices.

public static List<JobVertex> createDefaultJobVertices(JobConfiguration jobConfiguration) {
    final List<JobVertex> jobVertices = new ArrayList<>();
    final JobVertex source = new JobVertex("source");
    source.setInvokableClass(NoOpInvokable.class);
    source.setParallelism(jobConfiguration.getParallelism());
    jobVertices.add(source);
    final JobVertex sink = new JobVertex("sink");
    sink.setInvokableClass(NoOpInvokable.class);
    sink.setParallelism(jobConfiguration.getParallelism());
    jobVertices.add(sink);
    sink.connectNewDataSetAsInput(source, jobConfiguration.getDistributionPattern(), jobConfiguration.getResultPartitionType());
    return jobVertices;
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) AccessExecutionJobVertex(org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex) ArrayList(java.util.ArrayList)

Example 98 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class ForwardGroupComputeUtilTest method testIsolatedVertices.

/**
 * Tests that the computation of the job graph with isolated vertices works correctly.
 *
 * <pre>
 *     (v1)
 *
 *     (v2)
 *
 *     (v3)
 * </pre>
 */
@Test
public void testIsolatedVertices() throws Exception {
    JobVertex v1 = new JobVertex("v1");
    JobVertex v2 = new JobVertex("v2");
    JobVertex v3 = new JobVertex("v3");
    Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3);
    checkGroupSize(groups, 0);
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Test(org.junit.Test)

Example 99 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class ForwardGroupComputeUtilTest method testTwoInputsMergesIntoOne.

/**
 * Tests that the computation of the job graph where two upstream vertices connect with one
 * downstream vertex works correctly.
 *
 * <pre>
 *
 *     (v1) --
 *           |
 *           --> (v3) -> (v4)
 *           |
 *     (v2) --
 *
 * </pre>
 */
@Test
public void testTwoInputsMergesIntoOne() throws Exception {
    JobVertex v1 = new JobVertex("v1");
    JobVertex v2 = new JobVertex("v2");
    JobVertex v3 = new JobVertex("v3");
    JobVertex v4 = new JobVertex("v4");
    v3.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
    v1.getProducedDataSets().get(0).getConsumer().setForward(true);
    v3.connectNewDataSetAsInput(v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    v2.getProducedDataSets().get(0).getConsumer().setForward(true);
    v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
    Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3, v4);
    checkGroupSize(groups, 1, 3);
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Test(org.junit.Test)

Example 100 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class StreamingJobGraphGeneratorNodeHashTest method testNodeHashAfterIntermediateUnchaining.

/**
 * Tests that (un)chaining affects the node hash (for intermediate nodes).
 *
 * <pre>
 * A (chained): [ (src0) -> (map) -> (filter) -> (sink) ]
 * B (unchained): [ (src0) ] -> [ (map) -> (filter) -> (sink) ]
 * </pre>
 *
 * <p>The hashes for the single vertex in A and the source vertex in B need to be different.
 */
@Test
public void testNodeHashAfterIntermediateUnchaining() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
    env.setParallelism(4);
    env.addSource(new NoOpSourceFunction()).map(new NoOpMapFunction()).name("map").startNewChain().filter(new NoOpFilterFunction()).addSink(new DiscardingSink<>());
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    JobVertex chainedMap = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
    assertTrue(chainedMap.getName().startsWith("map"));
    JobVertexID chainedMapId = chainedMap.getID();
    env = StreamExecutionEnvironment.createLocalEnvironment();
    env.setParallelism(4);
    env.addSource(new NoOpSourceFunction()).map(new NoOpMapFunction()).name("map").startNewChain().filter(new NoOpFilterFunction()).startNewChain().addSink(new DiscardingSink<>());
    jobGraph = env.getStreamGraph().getJobGraph();
    JobVertex unchainedMap = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
    assertEquals("map", unchainedMap.getName());
    JobVertexID unchainedMapId = unchainedMap.getID();
    assertNotEquals(chainedMapId, unchainedMapId);
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Aggregations

JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)378 Test (org.junit.Test)230 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)197 Configuration (org.apache.flink.configuration.Configuration)74 JobID (org.apache.flink.api.common.JobID)60 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)58 ArrayList (java.util.ArrayList)57 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)47 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)44 SlotSharingGroup (org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup)41 SchedulerBase (org.apache.flink.runtime.scheduler.SchedulerBase)35 HashMap (java.util.HashMap)30 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)29 IOException (java.io.IOException)24 ExecutionGraph (org.apache.flink.runtime.executiongraph.ExecutionGraph)24 TaskConfig (org.apache.flink.runtime.operators.util.TaskConfig)24 Set (java.util.Set)23 JobException (org.apache.flink.runtime.JobException)23 Scheduler (org.apache.flink.runtime.jobmanager.scheduler.Scheduler)23 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)22