use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testRegistrationOfExecutionsCanceled.
@Test
public void testRegistrationOfExecutionsCanceled() {
try {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
SchedulerBase scheduler = setupScheduler(v1, 19, v2, 37);
Collection<Execution> executions = new ArrayList<>(scheduler.getExecutionGraph().getRegisteredExecutions().values());
for (Execution e : executions) {
e.cancel();
e.completeCancelling();
}
assertEquals(0, scheduler.getExecutionGraph().getRegisteredExecutions().size());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class EdgeManagerBuildUtilTest method setupExecutionGraph.
private Pair<ExecutionJobVertex, ExecutionJobVertex> setupExecutionGraph(int upstream, int downstream, DistributionPattern pattern) throws Exception {
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(upstream);
v2.setParallelism(downstream);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, pattern, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setVertexParallelismStore(SchedulerBase.computeVertexParallelismStore(ordered)).build();
eg.attachJobGraph(ordered);
return Pair.of(eg.getAllVertices().get(v1.getID()), eg.getAllVertices().get(v2.getID()));
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ExecutionGraphPartitionReleaseTest method testStrategyNotifiedOfUnFinishedVertices.
@Test
public void testStrategyNotifiedOfUnFinishedVertices() throws Exception {
// setup a pipeline of 2 failover regions (f1 -> f2), where
// f1 is just a source
// f2 consists of 3 operators (o1,o2,o3), where o1 consumes f1, and o2/o3 consume o1
final JobVertex sourceVertex = ExecutionGraphTestUtils.createNoOpVertex("source", 1);
final JobVertex operator1Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator1", 1);
final JobVertex operator2Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator2", 1);
final JobVertex operator3Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator3", 1);
operator1Vertex.connectNewDataSetAsInput(sourceVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
operator2Vertex.connectNewDataSetAsInput(operator1Vertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
operator3Vertex.connectNewDataSetAsInput(operator1Vertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
// setup partition tracker to intercept partition release calls
final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
final Queue<ResultPartitionID> releasedPartitions = new ArrayDeque<>();
partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionIds -> releasedPartitions.add(partitionIds.iterator().next()));
final SchedulerBase scheduler = createScheduler(partitionTracker, sourceVertex, operator1Vertex, operator2Vertex, operator3Vertex);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
mainThreadExecutor.execute(() -> {
final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
// finish the source; this should not result in any release calls since the
// consumer o1 was not finished
scheduler.updateTaskExecutionState(new TaskExecutionState(sourceExecution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, empty());
});
mainThreadExecutor.execute(() -> {
final Execution operator1Execution = getCurrentExecution(operator1Vertex, executionGraph);
// release calls since not all operators of the pipelined region are finished
for (final IntermediateResultPartitionID partitionId : operator1Execution.getVertex().getProducedPartitions().keySet()) {
scheduler.notifyPartitionDataAvailable(new ResultPartitionID(partitionId, operator1Execution.getAttemptId()));
}
scheduler.updateTaskExecutionState(new TaskExecutionState(operator1Execution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, empty());
});
mainThreadExecutor.execute(() -> {
final Execution operator2Execution = getCurrentExecution(operator2Vertex, executionGraph);
// finish o2; this should not result in any release calls since o3 was not
// finished
scheduler.updateTaskExecutionState(new TaskExecutionState(operator2Execution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, empty());
});
mainThreadExecutor.execute(() -> {
final Execution operator2Execution = getCurrentExecution(operator2Vertex, executionGraph);
// reset o2
operator2Execution.getVertex().resetForNewExecution();
assertThat(releasedPartitions, empty());
});
mainThreadExecutor.execute(() -> {
final Execution operator3Execution = getCurrentExecution(operator3Vertex, executionGraph);
// finish o3; this should not result in any release calls since o2 was reset
scheduler.updateTaskExecutionState(new TaskExecutionState(operator3Execution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, empty());
});
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ExecutionJobVertexTest method createDynamicExecutionJobVertex.
public static ExecutionJobVertex createDynamicExecutionJobVertex(int parallelism, int maxParallelism, int defaultMaxParallelism) throws Exception {
JobVertex jobVertex = new JobVertex("testVertex");
jobVertex.setInvokableClass(AbstractInvokable.class);
jobVertex.createAndAddResultDataSet(new IntermediateDataSetID(), ResultPartitionType.BLOCKING);
if (maxParallelism > 0) {
jobVertex.setMaxParallelism(maxParallelism);
}
if (parallelism > 0) {
jobVertex.setParallelism(parallelism);
}
final DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().build();
final VertexParallelismStore vertexParallelismStore = AdaptiveBatchScheduler.computeVertexParallelismStoreForDynamicGraph(Collections.singletonList(jobVertex), defaultMaxParallelism);
final VertexParallelismInformation vertexParallelismInfo = vertexParallelismStore.getParallelismInfo(jobVertex.getID());
return new ExecutionJobVertex(eg, jobVertex, vertexParallelismInfo);
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class JobDispatcherITCase method generateAndPersistJobGraph.
private JobID generateAndPersistJobGraph(Configuration configuration, long checkpointInterval, Path tmpPath) throws Exception {
final JobVertex jobVertex = new JobVertex("jobVertex");
jobVertex.setInvokableClass(AtLeastOneCheckpointInvokable.class);
jobVertex.setParallelism(1);
final CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(checkpointInterval).build();
final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings(checkpointCoordinatorConfiguration, null);
final JobGraph jobGraph = JobGraphBuilder.newStreamingJobGraphBuilder().addJobVertex(jobVertex).setJobCheckpointingSettings(checkpointingSettings).build();
final Path jobGraphPath = tmpPath.resolve(JOB_GRAPH_FILE_PATH.defaultValue());
try (ObjectOutputStream objectOutputStream = new ObjectOutputStream(Files.newOutputStream(jobGraphPath, CREATE))) {
objectOutputStream.writeObject(jobGraph);
}
configuration.setString(JOB_GRAPH_FILE_PATH.key(), jobGraphPath.toString());
return jobGraph.getJobID();
}
Aggregations