use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class IntermediateResultPartitionTest method createExecutionGraph.
public static ExecutionGraph createExecutionGraph(int producerParallelism, int consumerParallelism, int consumerMaxParallelism, DistributionPattern distributionPattern, boolean isDynamicGraph) throws Exception {
final JobVertex v1 = new JobVertex("v1");
v1.setInvokableClass(NoOpInvokable.class);
v1.setParallelism(producerParallelism);
final JobVertex v2 = new JobVertex("v2");
v2.setInvokableClass(NoOpInvokable.class);
if (consumerParallelism > 0) {
v2.setParallelism(consumerParallelism);
}
if (consumerMaxParallelism > 0) {
v2.setMaxParallelism(consumerMaxParallelism);
}
v2.connectNewDataSetAsInput(v1, distributionPattern, ResultPartitionType.BLOCKING);
final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(Arrays.asList(v1, v2)).build();
final Configuration configuration = new Configuration();
TestingDefaultExecutionGraphBuilder builder = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).setJobMasterConfig(configuration).setVertexParallelismStore(computeVertexParallelismStoreConsideringDynamicGraph(jobGraph.getVertices(), isDynamicGraph, consumerMaxParallelism));
if (isDynamicGraph) {
return builder.buildDynamicGraph();
} else {
return builder.build();
}
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class ExecutionVertexTest method testFindLatestAllocationIgnoresFailedAttempts.
@Test
public void testFindLatestAllocationIgnoresFailedAttempts() throws Exception {
final JobVertex source = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(source);
final TestingPhysicalSlotProvider withLimitedAmountOfPhysicalSlots = TestingPhysicalSlotProvider.createWithLimitedAmountOfPhysicalSlots(1);
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setExecutionSlotAllocatorFactory(SchedulerTestingUtils.newSlotSharingExecutionSlotAllocatorFactory(withLimitedAmountOfPhysicalSlots)).build();
scheduler.startScheduling();
final ExecutionJobVertex sourceExecutionJobVertex = scheduler.getExecutionJobVertex(source.getID());
final ExecutionVertex sourceExecutionVertex = sourceExecutionJobVertex.getTaskVertices()[0];
final Execution firstExecution = sourceExecutionVertex.getCurrentExecutionAttempt();
final TestingPhysicalSlot physicalSlot = withLimitedAmountOfPhysicalSlots.getFirstResponseOrFail().join();
final AllocationID allocationId = physicalSlot.getAllocationId();
final TaskManagerLocation taskManagerLocation = physicalSlot.getTaskManagerLocation();
cancelExecution(firstExecution);
sourceExecutionVertex.resetForNewExecution();
assertThat(sourceExecutionVertex.findLatestPriorAllocation()).hasValue(allocationId);
assertThat(sourceExecutionVertex.findLatestPriorLocation()).hasValue(taskManagerLocation);
final Execution secondExecution = sourceExecutionVertex.getCurrentExecutionAttempt();
cancelExecution(secondExecution);
sourceExecutionVertex.resetForNewExecution();
assertThat(sourceExecutionVertex.findLatestPriorAllocation()).hasValue(allocationId);
assertThat(sourceExecutionVertex.findLatestPriorLocation()).hasValue(taskManagerLocation);
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class PointwisePatternTest method setUpExecutionGraphAndGetDownstreamVertex.
private ExecutionJobVertex setUpExecutionGraphAndGetDownstreamVertex(int upstream, int downstream) throws Exception {
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(upstream);
v2.setParallelism(downstream);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setVertexParallelismStore(SchedulerBase.computeVertexParallelismStore(ordered)).build();
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
return eg.getAllVertices().get(v2.getID());
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method createSchedulerAndDeploy.
private DefaultScheduler createSchedulerAndDeploy(JobID jobId, JobVertex v1, JobVertex v2, DistributionPattern distributionPattern, BlobWriter blobWriter) throws Exception {
v2.connectNewDataSetAsInput(v1, distributionPattern, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
final DefaultScheduler scheduler = createScheduler(jobId, ordered, blobWriter, mainThreadExecutor, ioExecutor);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
final TestingLogicalSlotBuilder slotBuilder = new TestingLogicalSlotBuilder();
CompletableFuture.runAsync(() -> {
try {
// Deploy upstream source vertices
deployTasks(executionGraph, v1.getID(), slotBuilder);
// Transition upstream vertices into FINISHED
transitionTasksToFinished(executionGraph, v1.getID());
// Deploy downstream sink vertices
deployTasks(executionGraph, v2.getID(), slotBuilder);
} catch (Exception e) {
throw new RuntimeException("Exceptions shouldn't happen here.", e);
}
}, mainThreadExecutor).join();
return scheduler;
}
use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForAllToAllEdgeAfterFinished.
private void testRemoveCacheForAllToAllEdgeAfterFinished(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.ALL_TO_ALL, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(PARALLELISM, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
// For the all-to-all edge, we transition all downstream tasks to finished
CompletableFuture.runAsync(() -> transitionTasksToFinished(executionGraph, v2.getID()), mainThreadExecutor).join();
ioExecutor.triggerAll();
// Cache should be removed since partitions are released
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2));
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
Aggregations