Search in sources :

Example 51 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class DefaultSchedulingPipelinedRegionTest method gettingUnknownVertexThrowsException.

@Test
public void gettingUnknownVertexThrowsException() {
    final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap();
    final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion(Collections.emptySet(), resultPartitionById::get);
    final ExecutionVertexID unknownVertexId = new ExecutionVertexID(new JobVertexID(), 0);
    try {
        pipelinedRegion.getVertex(unknownVertexId);
        fail("Expected exception not thrown");
    } catch (IllegalArgumentException e) {
        assertThat(e.getMessage(), containsString(unknownVertexId + " not found"));
    }
}
Also used : ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 52 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class DefaultSchedulingPipelinedRegionTest method returnsVertices.

@Test
public void returnsVertices() {
    final DefaultExecutionVertex vertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), () -> ExecutionState.CREATED, Collections.emptyList(), partitionID -> {
        throw new UnsupportedOperationException();
    });
    final Set<DefaultExecutionVertex> vertices = Collections.singleton(vertex);
    final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap();
    final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion(vertices, resultPartitionById::get);
    final Iterator<DefaultExecutionVertex> vertexIterator = pipelinedRegion.getVertices().iterator();
    assertThat(vertexIterator.hasNext(), is(true));
    assertThat(vertexIterator.next(), is(sameInstance(vertex)));
    assertThat(vertexIterator.hasNext(), is(false));
}
Also used : ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 53 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class DefaultExecutionTopologyTest method testResultPartitionOrThrow.

@Test
public void testResultPartitionOrThrow() {
    try {
        adapter.getResultPartition(new IntermediateResultPartitionID());
        fail("get not exist result partition");
    } catch (IllegalArgumentException exception) {
    // expected
    }
}
Also used : IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 54 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class SchedulingPipelinedRegionComputeUtil method getNonReconnectableConsumedResults.

private static Iterable<SchedulingResultPartition> getNonReconnectableConsumedResults(SchedulingExecutionVertex vertex, Function<IntermediateResultPartitionID, ? extends SchedulingResultPartition> resultPartitionRetriever) {
    List<SchedulingResultPartition> nonReconnectableConsumedResults = new ArrayList<>();
    for (ConsumedPartitionGroup consumedPartitionGroup : vertex.getConsumedPartitionGroups()) {
        for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
            SchedulingResultPartition consumedResult = resultPartitionRetriever.apply(partitionId);
            if (consumedResult.getResultType().isReconnectable()) {
                // The result types of partitions in one ConsumedPartitionGroup are all the same
                break;
            }
            nonReconnectableConsumedResults.add(consumedResult);
        }
    }
    return nonReconnectableConsumedResults;
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) SchedulingResultPartition(org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition) ArrayList(java.util.ArrayList) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Example 55 with IntermediateResultPartitionID

use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.

the class TaskDeploymentDescriptorFactoryTest method testCacheShuffleDescriptor.

private void testCacheShuffleDescriptor(TestingBlobWriter blobWriter) throws Exception {
    final JobID jobId = new JobID();
    final ExecutionJobVertex ejv = setupExecutionGraphAndGetVertex(jobId, blobWriter);
    final ExecutionVertex ev21 = ejv.getTaskVertices()[0];
    createTaskDeploymentDescriptor(ev21);
    // The ShuffleDescriptors should be cached
    final IntermediateResult consumedResult = ejv.getInputs().get(0);
    final MaybeOffloaded<ShuffleDescriptor[]> maybeOffloaded = consumedResult.getCachedShuffleDescriptors(ev21.getConsumedPartitionGroup(0));
    final ShuffleDescriptor[] cachedShuffleDescriptors = deserializeShuffleDescriptors(maybeOffloaded, jobId, blobWriter);
    // Check if the ShuffleDescriptors are cached correctly
    assertEquals(ev21.getConsumedPartitionGroup(0).size(), cachedShuffleDescriptors.length);
    int idx = 0;
    for (IntermediateResultPartitionID consumedPartitionId : ev21.getConsumedPartitionGroup(0)) {
        assertEquals(consumedPartitionId, cachedShuffleDescriptors[idx++].getResultPartitionID().getPartitionId());
    }
}
Also used : IntermediateResult(org.apache.flink.runtime.executiongraph.IntermediateResult) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ShuffleDescriptor(org.apache.flink.runtime.shuffle.ShuffleDescriptor) JobID(org.apache.flink.api.common.JobID) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Aggregations

IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)66 Test (org.junit.Test)41 IntermediateDataSetID (org.apache.flink.runtime.jobgraph.IntermediateDataSetID)18 ConsumedPartitionGroup (org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup)14 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)13 JobID (org.apache.flink.api.common.JobID)12 ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)12 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)11 ArrayList (java.util.ArrayList)10 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)10 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)10 InputGateDeploymentDescriptor (org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor)9 ResultPartitionDeploymentDescriptor (org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor)9 CompletableFuture (java.util.concurrent.CompletableFuture)8 Configuration (org.apache.flink.configuration.Configuration)8 ShuffleDescriptor (org.apache.flink.runtime.shuffle.ShuffleDescriptor)8 IOException (java.io.IOException)7 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)7 Collection (java.util.Collection)6 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)6