Search in sources :

Example 41 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class ExecutionGraphTestUtils method verifyGeneratedExecutionJobVertex.

// ------------------------------------------------------------------------
// graph vertex verifications
// ------------------------------------------------------------------------
/**
 * Verifies the generated {@link ExecutionJobVertex} for a given {@link JobVertex} in a {@link
 * ExecutionGraph}.
 *
 * @param executionGraph the generated execution graph
 * @param originJobVertex the vertex to verify for
 * @param inputJobVertices upstream vertices of the verified vertex, used to check inputs of
 *     generated vertex
 * @param outputJobVertices downstream vertices of the verified vertex, used to check produced
 *     data sets of generated vertex
 */
public static void verifyGeneratedExecutionJobVertex(ExecutionGraph executionGraph, JobVertex originJobVertex, @Nullable List<JobVertex> inputJobVertices, @Nullable List<JobVertex> outputJobVertices) {
    ExecutionJobVertex ejv = executionGraph.getAllVertices().get(originJobVertex.getID());
    assertNotNull(ejv);
    // verify basic properties
    assertEquals(originJobVertex.getParallelism(), ejv.getParallelism());
    assertEquals(executionGraph.getJobID(), ejv.getJobId());
    assertEquals(originJobVertex.getID(), ejv.getJobVertexId());
    assertEquals(originJobVertex, ejv.getJobVertex());
    // verify produced data sets
    if (outputJobVertices == null) {
        assertEquals(0, ejv.getProducedDataSets().length);
    } else {
        assertEquals(outputJobVertices.size(), ejv.getProducedDataSets().length);
        for (int i = 0; i < outputJobVertices.size(); i++) {
            assertEquals(originJobVertex.getProducedDataSets().get(i).getId(), ejv.getProducedDataSets()[i].getId());
            assertEquals(originJobVertex.getParallelism(), ejv.getProducedDataSets()[0].getPartitions().length);
        }
    }
    // verify task vertices for their basic properties and their inputs
    assertEquals(originJobVertex.getParallelism(), ejv.getTaskVertices().length);
    int subtaskIndex = 0;
    for (ExecutionVertex ev : ejv.getTaskVertices()) {
        assertEquals(executionGraph.getJobID(), ev.getJobId());
        assertEquals(originJobVertex.getID(), ev.getJobvertexId());
        assertEquals(originJobVertex.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
        assertEquals(subtaskIndex, ev.getParallelSubtaskIndex());
        if (inputJobVertices == null) {
            assertEquals(0, ev.getNumberOfInputs());
        } else {
            assertEquals(inputJobVertices.size(), ev.getNumberOfInputs());
            for (int i = 0; i < inputJobVertices.size(); i++) {
                ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(i);
                assertEquals(inputJobVertices.get(i).getParallelism(), consumedPartitionGroup.size());
                int expectedPartitionNum = 0;
                for (IntermediateResultPartitionID consumedPartitionId : consumedPartitionGroup) {
                    assertEquals(expectedPartitionNum, consumedPartitionId.getPartitionNumber());
                    expectedPartitionNum++;
                }
            }
        }
        subtaskIndex++;
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Example 42 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class ExecutionGraphVariousFailuesTest method testFailingNotifyPartitionDataAvailable.

/**
 * Tests that a failing notifyPartitionDataAvailable call with a non-existing execution attempt
 * id, will not fail the execution graph.
 */
@Test
public void testFailingNotifyPartitionDataAvailable() throws Exception {
    final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(JobGraphTestUtils.emptyJobGraph(), ComponentMainThreadExecutorServiceAdapter.forMainThread()).build();
    scheduler.startScheduling();
    final ExecutionGraph eg = scheduler.getExecutionGraph();
    assertEquals(JobStatus.RUNNING, eg.getState());
    ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
    IntermediateResultPartitionID intermediateResultPartitionId = new IntermediateResultPartitionID();
    ExecutionAttemptID producerId = new ExecutionAttemptID();
    ResultPartitionID resultPartitionId = new ResultPartitionID(intermediateResultPartitionId, producerId);
    try {
        scheduler.notifyPartitionDataAvailable(resultPartitionId);
        fail("Error expected.");
    } catch (IllegalStateException e) {
        // we've expected this exception to occur
        assertThat(e.getMessage(), containsString("Cannot find execution for execution Id"));
    }
    assertEquals(JobStatus.RUNNING, eg.getState());
}
Also used : SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 43 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class ExecutionVertexTest method testResetForNewExecutionReleasesPartitions.

@Test
public void testResetForNewExecutionReleasesPartitions() throws Exception {
    final JobVertex producerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex consumerJobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
    consumerJobVertex.connectNewDataSetAsInput(producerJobVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    final CompletableFuture<Collection<ResultPartitionID>> releasePartitionsFuture = new CompletableFuture<>();
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    partitionTracker.setStopTrackingAndReleasePartitionsConsumer(releasePartitionsFuture::complete);
    final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producerJobVertex, consumerJobVertex);
    final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setPartitionTracker(partitionTracker).build();
    scheduler.startScheduling();
    final ExecutionJobVertex producerExecutionJobVertex = scheduler.getExecutionJobVertex(producerJobVertex.getID());
    Execution execution = producerExecutionJobVertex.getTaskVertices()[0].getCurrentExecutionAttempt();
    assertFalse(releasePartitionsFuture.isDone());
    execution.markFinished();
    assertFalse(releasePartitionsFuture.isDone());
    for (ExecutionVertex executionVertex : producerExecutionJobVertex.getTaskVertices()) {
        executionVertex.resetForNewExecution();
    }
    final IntermediateResultPartitionID intermediateResultPartitionID = producerExecutionJobVertex.getProducedDataSets()[0].getPartitions()[0].getPartitionId();
    final ResultPartitionID resultPartitionID = execution.getResultPartitionDeploymentDescriptor(intermediateResultPartitionID).get().getShuffleDescriptor().getResultPartitionID();
    assertThat(releasePartitionsFuture.get()).contains(resultPartitionID);
}
Also used : TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) CompletableFuture(java.util.concurrent.CompletableFuture) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Collection(java.util.Collection) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 44 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class PointwisePatternTest method testHighToLow.

private void testHighToLow(int highDop, int lowDop) throws Exception {
    if (highDop < lowDop) {
        throw new IllegalArgumentException();
    }
    final int factor = highDop / lowDop;
    final int delta = highDop % lowDop == 0 ? 0 : 1;
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(highDop, lowDop);
    int[] timesUsed = new int[highDop];
    for (ExecutionVertex ev : target.getTaskVertices()) {
        assertEquals(1, ev.getNumberOfInputs());
        List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>();
        for (ConsumedPartitionGroup partitionGroup : ev.getAllConsumedPartitionGroups()) {
            for (IntermediateResultPartitionID partitionId : partitionGroup) {
                consumedPartitions.add(partitionId);
            }
        }
        assertTrue(consumedPartitions.size() >= factor && consumedPartitions.size() <= factor + delta);
        for (IntermediateResultPartitionID consumedPartition : consumedPartitions) {
            timesUsed[consumedPartition.getPartitionNumber()]++;
        }
    }
    for (int used : timesUsed) {
        assertEquals(1, used);
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) ArrayList(java.util.ArrayList) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Example 45 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class PointwisePatternTest method testConnections.

/**
 * Verify the connections between upstream result partitions and downstream vertices.
 */
private void testConnections(int sourceParallelism, int targetParallelism, int[][] expectedConsumedPartitionNumber) throws Exception {
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(sourceParallelism, targetParallelism);
    for (int vertexIndex = 0; vertexIndex < target.getTaskVertices().length; vertexIndex++) {
        ExecutionVertex ev = target.getTaskVertices()[vertexIndex];
        ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
        assertEquals(expectedConsumedPartitionNumber[vertexIndex].length, consumedPartitionGroup.size());
        int partitionIndex = 0;
        for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
            assertEquals(expectedConsumedPartitionNumber[vertexIndex][partitionIndex++], partitionId.getPartitionNumber());
        }
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Aggregations

IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)66 Test (org.junit.Test)41 IntermediateDataSetID (org.apache.flink.runtime.jobgraph.IntermediateDataSetID)18 ConsumedPartitionGroup (org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup)14 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)13 JobID (org.apache.flink.api.common.JobID)12 ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)12 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)11 ArrayList (java.util.ArrayList)10 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)10 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)10 InputGateDeploymentDescriptor (org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor)9 ResultPartitionDeploymentDescriptor (org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor)9 CompletableFuture (java.util.concurrent.CompletableFuture)8 Configuration (org.apache.flink.configuration.Configuration)8 ShuffleDescriptor (org.apache.flink.runtime.shuffle.ShuffleDescriptor)8 IOException (java.io.IOException)7 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)7 Collection (java.util.Collection)6 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)6