Search in sources :

Example 6 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class ExecutionGraphPartitionReleaseTest method testStrategyNotifiedOfFinishedVerticesAndResultsRespected.

@Test
public void testStrategyNotifiedOfFinishedVerticesAndResultsRespected() throws Exception {
    // setup a simple pipeline of 3 operators with blocking partitions
    final JobVertex sourceVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex operatorVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex sinkVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    operatorVertex.connectNewDataSetAsInput(sourceVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    sinkVertex.connectNewDataSetAsInput(operatorVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    // setup partition tracker to intercept partition release calls
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    final Queue<ResultPartitionID> releasedPartitions = new ArrayDeque<>();
    partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionIds -> releasedPartitions.add(partitionIds.iterator().next()));
    final SchedulerBase scheduler = createScheduler(partitionTracker, sourceVertex, operatorVertex, sinkVertex);
    final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
    // finish vertices one after another, and verify that the appropriate partitions are
    // released
    mainThreadExecutor.execute(() -> {
        final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
        scheduler.updateTaskExecutionState(new TaskExecutionState(sourceExecution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, empty());
    });
    mainThreadExecutor.execute(() -> {
        final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
        final Execution operatorExecution = getCurrentExecution(operatorVertex, executionGraph);
        scheduler.updateTaskExecutionState(new TaskExecutionState(operatorExecution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, hasSize(1));
        assertThat(releasedPartitions.remove(), equalTo(new ResultPartitionID(sourceExecution.getVertex().getProducedPartitions().keySet().iterator().next(), sourceExecution.getAttemptId())));
    });
    mainThreadExecutor.execute(() -> {
        final Execution operatorExecution = getCurrentExecution(operatorVertex, executionGraph);
        final Execution sinkExecution = getCurrentExecution(sinkVertex, executionGraph);
        scheduler.updateTaskExecutionState(new TaskExecutionState(sinkExecution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, hasSize(1));
        assertThat(releasedPartitions.remove(), equalTo(new ResultPartitionID(operatorExecution.getVertex().getProducedPartitions().keySet().iterator().next(), operatorExecution.getAttemptId())));
    });
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) ArrayDeque(java.util.ArrayDeque) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) Test(org.junit.Test)

Example 7 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class ExecutionVertexTest method testResetForNewExecutionReleasesPartitions.

@Test
public void testResetForNewExecutionReleasesPartitions() throws Exception {
    final JobVertex producerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex consumerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    consumerJobVertex.connectNewDataSetAsInput(producerJobVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    final CompletableFuture<Collection<ResultPartitionID>> releasePartitionsFuture = new CompletableFuture<>();
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    partitionTracker.setStopTrackingAndReleasePartitionsConsumer(releasePartitionsFuture::complete);
    final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producerJobVertex, consumerJobVertex);
    final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setPartitionTracker(partitionTracker).build();
    scheduler.startScheduling();
    final ExecutionJobVertex producerExecutionJobVertex = scheduler.getExecutionJobVertex(producerJobVertex.getID());
    Execution execution = producerExecutionJobVertex.getTaskVertices()[0].getCurrentExecutionAttempt();
    assertFalse(releasePartitionsFuture.isDone());
    execution.markFinished();
    assertFalse(releasePartitionsFuture.isDone());
    for (ExecutionVertex executionVertex : producerExecutionJobVertex.getTaskVertices()) {
        executionVertex.resetForNewExecution();
    }
    final IntermediateResultPartitionID intermediateResultPartitionID = producerExecutionJobVertex.getProducedDataSets()[0].getPartitions()[0].getPartitionId();
    final ResultPartitionID resultPartitionID = execution.getResultPartitionDeploymentDescriptor(intermediateResultPartitionID).get().getShuffleDescriptor().getResultPartitionID();
    assertThat(releasePartitionsFuture.get()).contains(resultPartitionID);
}
Also used : TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) CompletableFuture(java.util.concurrent.CompletableFuture) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Collection(java.util.Collection) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Aggregations

TestingJobMasterPartitionTracker (org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker)7 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)6 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)5 Test (org.junit.Test)5 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)4 ArrayDeque (java.util.ArrayDeque)3 Collection (java.util.Collection)3 CompletableFuture (java.util.concurrent.CompletableFuture)3 TaskExecutionState (org.apache.flink.runtime.taskmanager.TaskExecutionState)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Function (java.util.function.Function)2 Configuration (org.apache.flink.configuration.Configuration)2 ResourceID (org.apache.flink.runtime.clusterframework.types.ResourceID)2 ResultPartitionDeploymentDescriptor (org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor)2 SchedulerBase (org.apache.flink.runtime.scheduler.SchedulerBase)2 TestLogger (org.apache.flink.util.TestLogger)2 Before (org.junit.Before)2 File (java.io.File)1 IOException (java.io.IOException)1