use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.
the class ExecutionGraphPartitionReleaseTest method testStrategyNotifiedOfFinishedVerticesAndResultsRespected.
@Test
public void testStrategyNotifiedOfFinishedVerticesAndResultsRespected() throws Exception {
// setup a simple pipeline of 3 operators with blocking partitions
final JobVertex sourceVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex operatorVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex sinkVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
operatorVertex.connectNewDataSetAsInput(sourceVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
sinkVertex.connectNewDataSetAsInput(operatorVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
// setup partition tracker to intercept partition release calls
final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
final Queue<ResultPartitionID> releasedPartitions = new ArrayDeque<>();
partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionIds -> releasedPartitions.add(partitionIds.iterator().next()));
final SchedulerBase scheduler = createScheduler(partitionTracker, sourceVertex, operatorVertex, sinkVertex);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// finish vertices one after another, and verify that the appropriate partitions are
// released
mainThreadExecutor.execute(() -> {
final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
scheduler.updateTaskExecutionState(new TaskExecutionState(sourceExecution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, empty());
});
mainThreadExecutor.execute(() -> {
final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
final Execution operatorExecution = getCurrentExecution(operatorVertex, executionGraph);
scheduler.updateTaskExecutionState(new TaskExecutionState(operatorExecution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, hasSize(1));
assertThat(releasedPartitions.remove(), equalTo(new ResultPartitionID(sourceExecution.getVertex().getProducedPartitions().keySet().iterator().next(), sourceExecution.getAttemptId())));
});
mainThreadExecutor.execute(() -> {
final Execution operatorExecution = getCurrentExecution(operatorVertex, executionGraph);
final Execution sinkExecution = getCurrentExecution(sinkVertex, executionGraph);
scheduler.updateTaskExecutionState(new TaskExecutionState(sinkExecution.getAttemptId(), ExecutionState.FINISHED));
assertThat(releasedPartitions, hasSize(1));
assertThat(releasedPartitions.remove(), equalTo(new ResultPartitionID(operatorExecution.getVertex().getProducedPartitions().keySet().iterator().next(), operatorExecution.getAttemptId())));
});
}
use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.
the class ExecutionVertexTest method testResetForNewExecutionReleasesPartitions.
@Test
public void testResetForNewExecutionReleasesPartitions() throws Exception {
final JobVertex producerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex consumerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
consumerJobVertex.connectNewDataSetAsInput(producerJobVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final CompletableFuture<Collection<ResultPartitionID>> releasePartitionsFuture = new CompletableFuture<>();
final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
partitionTracker.setStopTrackingAndReleasePartitionsConsumer(releasePartitionsFuture::complete);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producerJobVertex, consumerJobVertex);
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setPartitionTracker(partitionTracker).build();
scheduler.startScheduling();
final ExecutionJobVertex producerExecutionJobVertex = scheduler.getExecutionJobVertex(producerJobVertex.getID());
Execution execution = producerExecutionJobVertex.getTaskVertices()[0].getCurrentExecutionAttempt();
assertFalse(releasePartitionsFuture.isDone());
execution.markFinished();
assertFalse(releasePartitionsFuture.isDone());
for (ExecutionVertex executionVertex : producerExecutionJobVertex.getTaskVertices()) {
executionVertex.resetForNewExecution();
}
final IntermediateResultPartitionID intermediateResultPartitionID = producerExecutionJobVertex.getProducedDataSets()[0].getPartitions()[0].getPartitionId();
final ResultPartitionID resultPartitionID = execution.getResultPartitionDeploymentDescriptor(intermediateResultPartitionID).get().getShuffleDescriptor().getResultPartitionID();
assertThat(releasePartitionsFuture.get()).contains(resultPartitionID);
}
Aggregations