use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.
the class ZooKeeperSubmittedJobGraphsStoreITCase method verifyJobGraphs.
protected void verifyJobGraphs(SubmittedJobGraph expected, SubmittedJobGraph actual) throws Exception {
JobGraph expectedJobGraph = expected.getJobGraph();
JobGraph actualJobGraph = actual.getJobGraph();
assertEquals(expectedJobGraph.getName(), actualJobGraph.getName());
assertEquals(expectedJobGraph.getJobID(), actualJobGraph.getJobID());
JobInfo expectedJobInfo = expected.getJobInfo();
JobInfo actualJobInfo = actual.getJobInfo();
assertEquals(expectedJobInfo, actualJobInfo);
}
use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.
the class UnionClosedBranchingTest method testUnionClosedBranchingTest.
@Test
public void testUnionClosedBranchingTest() throws Exception {
// -----------------------------------------------------------------------------------------
// Build test program
// -----------------------------------------------------------------------------------------
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setExecutionMode(executionMode);
env.setParallelism(4);
DataSet<Tuple1<Integer>> src1 = env.fromElements(new Tuple1<>(0), new Tuple1<>(1));
DataSet<Tuple1<Integer>> src2 = env.fromElements(new Tuple1<>(0), new Tuple1<>(1));
DataSet<Tuple1<Integer>> union = src1.union(src2);
DataSet<Tuple2<Integer, Integer>> join = union.join(union).where(0).equalTo(0).projectFirst(0).projectSecond(0);
join.output(new DiscardingOutputFormat<Tuple2<Integer, Integer>>());
// -----------------------------------------------------------------------------------------
// Verify optimized plan
// -----------------------------------------------------------------------------------------
OptimizedPlan optimizedPlan = compileNoStats(env.createProgramPlan());
SinkPlanNode sinkNode = optimizedPlan.getDataSinks().iterator().next();
DualInputPlanNode joinNode = (DualInputPlanNode) sinkNode.getPredecessor();
// Verify that the compiler correctly sets the expected data exchange modes.
for (Channel channel : joinNode.getInputs()) {
assertEquals("Unexpected data exchange mode between union and join node.", unionToJoin, channel.getDataExchangeMode());
assertEquals("Unexpected ship strategy between union and join node.", unionToJoinStrategy, channel.getShipStrategy());
}
for (SourcePlanNode src : optimizedPlan.getDataSources()) {
for (Channel channel : src.getOutgoingChannels()) {
assertEquals("Unexpected data exchange mode between source and union node.", sourceToUnion, channel.getDataExchangeMode());
assertEquals("Unexpected ship strategy between source and union node.", sourceToUnionStrategy, channel.getShipStrategy());
}
}
// -----------------------------------------------------------------------------------------
// Verify generated JobGraph
// -----------------------------------------------------------------------------------------
JobGraphGenerator jgg = new JobGraphGenerator();
JobGraph jobGraph = jgg.compileJobGraph(optimizedPlan);
List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
// Sanity check for the test setup
assertEquals("Unexpected number of vertices created.", 4, vertices.size());
// Verify all sources
JobVertex[] sources = new JobVertex[] { vertices.get(0), vertices.get(1) };
for (JobVertex src : sources) {
// Sanity check
assertTrue("Unexpected vertex type. Test setup is broken.", src.isInputVertex());
// The union is not translated to an extra union task, but the join uses a union
// input gate to read multiple inputs. The source create a single result per consumer.
assertEquals("Unexpected number of created results.", 2, src.getNumberOfProducedIntermediateDataSets());
for (IntermediateDataSet dataSet : src.getProducedDataSets()) {
ResultPartitionType dsType = dataSet.getResultType();
// Ensure batch exchange unless PIPELINED_FORCE is enabled.
if (!executionMode.equals(ExecutionMode.PIPELINED_FORCED)) {
assertTrue("Expected batch exchange, but result type is " + dsType + ".", dsType.isBlocking());
} else {
assertFalse("Expected non-batch exchange, but result type is " + dsType + ".", dsType.isBlocking());
}
}
}
}
use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.
the class JobGraphGeneratorTest method testResourcesForChainedOperators.
/**
* Verifies that the resources are merged correctly for chained operators when
* generating job graph
*/
@Test
public void testResourcesForChainedOperators() throws Exception {
ResourceSpec resource1 = new ResourceSpec(0.1, 100);
ResourceSpec resource2 = new ResourceSpec(0.2, 200);
ResourceSpec resource3 = new ResourceSpec(0.3, 300);
ResourceSpec resource4 = new ResourceSpec(0.4, 400);
ResourceSpec resource5 = new ResourceSpec(0.5, 500);
ResourceSpec resource6 = new ResourceSpec(0.6, 600);
ResourceSpec resource7 = new ResourceSpec(0.7, 700);
Method opMethod = Operator.class.getDeclaredMethod("setResources", ResourceSpec.class);
opMethod.setAccessible(true);
Method sinkMethod = DataSink.class.getDeclaredMethod("setResources", ResourceSpec.class);
sinkMethod.setAccessible(true);
MapFunction<Long, Long> mapFunction = new MapFunction<Long, Long>() {
@Override
public Long map(Long value) throws Exception {
return value;
}
};
FilterFunction<Long> filterFunction = new FilterFunction<Long>() {
@Override
public boolean filter(Long value) throws Exception {
return false;
}
};
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Long> input = env.fromElements(1L, 2L, 3L);
opMethod.invoke(input, resource1);
DataSet<Long> map1 = input.map(mapFunction);
opMethod.invoke(map1, resource2);
// CHAIN(Source -> Map -> Filter)
DataSet<Long> filter1 = map1.filter(filterFunction);
opMethod.invoke(filter1, resource3);
IterativeDataSet<Long> startOfIteration = filter1.iterate(10);
opMethod.invoke(startOfIteration, resource4);
DataSet<Long> map2 = startOfIteration.map(mapFunction);
opMethod.invoke(map2, resource5);
// CHAIN(Map -> Filter)
DataSet<Long> feedback = map2.filter(filterFunction);
opMethod.invoke(feedback, resource6);
DataSink<Long> sink = startOfIteration.closeWith(feedback).output(new DiscardingOutputFormat<Long>());
sinkMethod.invoke(sink, resource7);
Plan plan = env.createProgramPlan();
Optimizer pc = new Optimizer(new Configuration());
OptimizedPlan op = pc.compile(plan);
JobGraphGenerator jgg = new JobGraphGenerator();
JobGraph jobGraph = jgg.compileJobGraph(op);
JobVertex sourceMapFilterVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(0);
JobVertex iterationHeadVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
JobVertex feedbackVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(2);
JobVertex sinkVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(3);
JobVertex iterationSyncVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(4);
assertTrue(sourceMapFilterVertex.getMinResources().equals(resource1.merge(resource2).merge(resource3)));
assertTrue(iterationHeadVertex.getPreferredResources().equals(resource4));
assertTrue(feedbackVertex.getMinResources().equals(resource5.merge(resource6)));
assertTrue(sinkVertex.getPreferredResources().equals(resource7));
assertTrue(iterationSyncVertex.getMinResources().equals(resource4));
}
use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.
the class TempInIterationsTest method testTempInIterationTest.
/*
* Tests whether temps barriers are correctly set in within iterations
*/
@Test
public void testTempInIterationTest() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple2<Long, Long>> input = env.readCsvFile("file:///does/not/exist").types(Long.class, Long.class);
DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration = input.iterateDelta(input, 1, 0);
DataSet<Tuple2<Long, Long>> update = iteration.getWorkset().join(iteration.getSolutionSet()).where(0).equalTo(0).with(new DummyFlatJoinFunction<Tuple2<Long, Long>>());
iteration.closeWith(update, update).output(new DiscardingOutputFormat<Tuple2<Long, Long>>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = (new Optimizer(new Configuration())).compile(plan);
JobGraphGenerator jgg = new JobGraphGenerator();
JobGraph jg = jgg.compileJobGraph(oPlan);
boolean solutionSetUpdateChecked = false;
for (JobVertex v : jg.getVertices()) {
if (v.getName().equals("SolutionSet Delta")) {
// check if input of solution set delta is temped
TaskConfig tc = new TaskConfig(v.getConfiguration());
assertTrue(tc.isInputAsynchronouslyMaterialized(0));
solutionSetUpdateChecked = true;
}
}
assertTrue(solutionSetUpdateChecked);
}
use of org.apache.flink.runtime.jobgraph.JobGraph in project flink by apache.
the class JMXJobManagerMetricTest method testJobManagerJMXMetricAccess.
/**
* Tests that metrics registered on the JobManager are actually accessible via JMX.
*
* @throws Exception
*/
@Test
public void testJobManagerJMXMetricAccess() throws Exception {
Deadline deadline = new FiniteDuration(2, TimeUnit.MINUTES).fromNow();
Configuration flinkConfiguration = new Configuration();
flinkConfiguration.setString(ConfigConstants.METRICS_REPORTERS_LIST, "test");
flinkConfiguration.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, JMXReporter.class.getName());
flinkConfiguration.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test.port", "9060-9075");
flinkConfiguration.setString(ConfigConstants.METRICS_SCOPE_NAMING_JM_JOB, "jobmanager.<job_name>");
TestingCluster flink = new TestingCluster(flinkConfiguration);
try {
flink.start();
JobVertex sourceJobVertex = new JobVertex("Source");
sourceJobVertex.setInvokableClass(BlockingInvokable.class);
JobGraph jobGraph = new JobGraph("TestingJob", sourceJobVertex);
jobGraph.setSnapshotSettings(new JobSnapshottingSettings(Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), 500, 500, 50, 5, ExternalizedCheckpointSettings.none(), null, true));
flink.waitForActorsToBeAlive();
flink.submitJobDetached(jobGraph);
Future<Object> jobRunning = flink.getLeaderGateway(deadline.timeLeft()).ask(new TestingJobManagerMessages.WaitForAllVerticesToBeRunning(jobGraph.getJobID()), deadline.timeLeft());
Await.ready(jobRunning, deadline.timeLeft());
MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
Set<ObjectName> nameSet = mBeanServer.queryNames(new ObjectName("org.apache.flink.jobmanager.job.lastCheckpointSize:job_name=TestingJob,*"), null);
Assert.assertEquals(1, nameSet.size());
assertEquals(-1L, mBeanServer.getAttribute(nameSet.iterator().next(), "Value"));
Future<Object> jobFinished = flink.getLeaderGateway(deadline.timeLeft()).ask(new TestingJobManagerMessages.NotifyWhenJobRemoved(jobGraph.getJobID()), deadline.timeLeft());
BlockingInvokable.unblock();
// wait til the job has finished
Await.ready(jobFinished, deadline.timeLeft());
} finally {
flink.stop();
}
}
Aggregations