use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class SchedulerBenchmarkUtils method createJobGraph.
public static JobGraph createJobGraph(List<JobVertex> jobVertices, JobConfiguration jobConfiguration) throws IOException {
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertices.toArray(new JobVertex[0]));
jobGraph.setJobType(jobConfiguration.getJobType());
final ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setExecutionMode(jobConfiguration.getExecutionMode());
jobGraph.setExecutionConfig(executionConfig);
return jobGraph;
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class SchedulerBenchmarkUtils method createDefaultJobVertices.
public static List<JobVertex> createDefaultJobVertices(JobConfiguration jobConfiguration) {
final List<JobVertex> jobVertices = new ArrayList<>();
final JobVertex source = new JobVertex("source");
source.setInvokableClass(NoOpInvokable.class);
source.setParallelism(jobConfiguration.getParallelism());
jobVertices.add(source);
final JobVertex sink = new JobVertex("sink");
sink.setInvokableClass(NoOpInvokable.class);
sink.setParallelism(jobConfiguration.getParallelism());
jobVertices.add(sink);
sink.connectNewDataSetAsInput(source, jobConfiguration.getDistributionPattern(), jobConfiguration.getResultPartitionType());
return jobVertices;
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ForwardGroupComputeUtilTest method testIsolatedVertices.
/**
* Tests that the computation of the job graph with isolated vertices works correctly.
*
* <pre>
* (v1)
*
* (v2)
*
* (v3)
* </pre>
*/
@Test
public void testIsolatedVertices() throws Exception {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3);
checkGroupSize(groups, 0);
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ForwardGroupComputeUtilTest method testTwoInputsMergesIntoOne.
/**
* Tests that the computation of the job graph where two upstream vertices connect with one
* downstream vertex works correctly.
*
* <pre>
*
* (v1) --
* |
* --> (v3) -> (v4)
* |
* (v2) --
*
* </pre>
*/
@Test
public void testTwoInputsMergesIntoOne() throws Exception {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
v3.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
v1.getProducedDataSets().get(0).getConsumer().setForward(true);
v3.connectNewDataSetAsInput(v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
v2.getProducedDataSets().get(0).getConsumer().setForward(true);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3, v4);
checkGroupSize(groups, 1, 3);
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class StreamingJobGraphGeneratorNodeHashTest method testNodeHashAfterIntermediateUnchaining.
/**
* Tests that (un)chaining affects the node hash (for intermediate nodes).
*
* <pre>
* A (chained): [ (src0) -> (map) -> (filter) -> (sink) ]
* B (unchained): [ (src0) ] -> [ (map) -> (filter) -> (sink) ]
* </pre>
*
* <p>The hashes for the single vertex in A and the source vertex in B need to be different.
*/
@Test
public void testNodeHashAfterIntermediateUnchaining() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(4);
env.addSource(new NoOpSourceFunction()).map(new NoOpMapFunction()).name("map").startNewChain().filter(new NoOpFilterFunction()).addSink(new DiscardingSink<>());
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
JobVertex chainedMap = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
assertTrue(chainedMap.getName().startsWith("map"));
JobVertexID chainedMapId = chainedMap.getID();
env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(4);
env.addSource(new NoOpSourceFunction()).map(new NoOpMapFunction()).name("map").startNewChain().filter(new NoOpFilterFunction()).startNewChain().addSink(new DiscardingSink<>());
jobGraph = env.getStreamGraph().getJobGraph();
JobVertex unchainedMap = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
assertEquals("map", unchainedMap.getName());
JobVertexID unchainedMapId = unchainedMap.getID();
assertNotEquals(chainedMapId, unchainedMapId);
}
Aggregations