use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class DefaultSchedulingPipelinedRegionTest method gettingUnknownVertexThrowsException.
@Test
public void gettingUnknownVertexThrowsException() {
final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap();
final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion(Collections.emptySet(), resultPartitionById::get);
final ExecutionVertexID unknownVertexId = new ExecutionVertexID(new JobVertexID(), 0);
try {
pipelinedRegion.getVertex(unknownVertexId);
fail("Expected exception not thrown");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(unknownVertexId + " not found"));
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class DefaultSchedulingPipelinedRegionTest method returnsVertices.
@Test
public void returnsVertices() {
final DefaultExecutionVertex vertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), () -> ExecutionState.CREATED, Collections.emptyList(), partitionID -> {
throw new UnsupportedOperationException();
});
final Set<DefaultExecutionVertex> vertices = Collections.singleton(vertex);
final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap();
final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion(vertices, resultPartitionById::get);
final Iterator<DefaultExecutionVertex> vertexIterator = pipelinedRegion.getVertices().iterator();
assertThat(vertexIterator.hasNext(), is(true));
assertThat(vertexIterator.next(), is(sameInstance(vertex)));
assertThat(vertexIterator.hasNext(), is(false));
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class DefaultExecutionTopologyTest method testResultPartitionOrThrow.
@Test
public void testResultPartitionOrThrow() {
try {
adapter.getResultPartition(new IntermediateResultPartitionID());
fail("get not exist result partition");
} catch (IllegalArgumentException exception) {
// expected
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class SchedulingPipelinedRegionComputeUtil method getNonReconnectableConsumedResults.
private static Iterable<SchedulingResultPartition> getNonReconnectableConsumedResults(SchedulingExecutionVertex vertex, Function<IntermediateResultPartitionID, ? extends SchedulingResultPartition> resultPartitionRetriever) {
List<SchedulingResultPartition> nonReconnectableConsumedResults = new ArrayList<>();
for (ConsumedPartitionGroup consumedPartitionGroup : vertex.getConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
SchedulingResultPartition consumedResult = resultPartitionRetriever.apply(partitionId);
if (consumedResult.getResultType().isReconnectable()) {
// The result types of partitions in one ConsumedPartitionGroup are all the same
break;
}
nonReconnectableConsumedResults.add(consumedResult);
}
}
return nonReconnectableConsumedResults;
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class TaskDeploymentDescriptorFactoryTest method testCacheShuffleDescriptor.
private void testCacheShuffleDescriptor(TestingBlobWriter blobWriter) throws Exception {
final JobID jobId = new JobID();
final ExecutionJobVertex ejv = setupExecutionGraphAndGetVertex(jobId, blobWriter);
final ExecutionVertex ev21 = ejv.getTaskVertices()[0];
createTaskDeploymentDescriptor(ev21);
// The ShuffleDescriptors should be cached
final IntermediateResult consumedResult = ejv.getInputs().get(0);
final MaybeOffloaded<ShuffleDescriptor[]> maybeOffloaded = consumedResult.getCachedShuffleDescriptors(ev21.getConsumedPartitionGroup(0));
final ShuffleDescriptor[] cachedShuffleDescriptors = deserializeShuffleDescriptors(maybeOffloaded, jobId, blobWriter);
// Check if the ShuffleDescriptors are cached correctly
assertEquals(ev21.getConsumedPartitionGroup(0).size(), cachedShuffleDescriptors.length);
int idx = 0;
for (IntermediateResultPartitionID consumedPartitionId : ev21.getConsumedPartitionGroup(0)) {
assertEquals(consumedPartitionId, cachedShuffleDescriptors[idx++].getResultPartitionID().getPartitionId());
}
}
Aggregations