use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.
the class DefaultSchedulingPipelinedRegionTest method returnsVertices.
@Test
public void returnsVertices() {
final DefaultExecutionVertex vertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), () -> ExecutionState.CREATED, Collections.emptyList(), partitionID -> {
throw new UnsupportedOperationException();
});
final Set<DefaultExecutionVertex> vertices = Collections.singleton(vertex);
final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap();
final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion(vertices, resultPartitionById::get);
final Iterator<DefaultExecutionVertex> vertexIterator = pipelinedRegion.getVertices().iterator();
assertThat(vertexIterator.hasNext(), is(true));
assertThat(vertexIterator.next(), is(sameInstance(vertex)));
assertThat(vertexIterator.hasNext(), is(false));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.
the class TestExecutionSlotAllocator method createSlotVertexAssignments.
private List<SlotExecutionVertexAssignment> createSlotVertexAssignments(final Collection<ExecutionVertexID> executionVertexIds) {
final List<SlotExecutionVertexAssignment> result = new ArrayList<>();
for (ExecutionVertexID executionVertexId : executionVertexIds) {
final CompletableFuture<LogicalSlot> logicalSlotFuture = new CompletableFuture<>();
result.add(new SlotExecutionVertexAssignment(executionVertexId, logicalSlotFuture));
}
return result;
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.
the class DefaultExecutionTopologyTest method testExistingRegionsAreNotAffectedDuringTopologyUpdate.
@Test
public void testExistingRegionsAreNotAffectedDuringTopologyUpdate() throws Exception {
final JobVertex[] jobVertices = createJobVertices(BLOCKING);
executionGraph = createDynamicGraph(jobVertices);
adapter = DefaultExecutionTopology.fromExecutionGraph(executionGraph);
final ExecutionJobVertex ejv1 = executionGraph.getJobVertex(jobVertices[0].getID());
final ExecutionJobVertex ejv2 = executionGraph.getJobVertex(jobVertices[1].getID());
executionGraph.initializeJobVertex(ejv1, 0L);
adapter.notifyExecutionGraphUpdated(executionGraph, Collections.singletonList(ejv1));
SchedulingPipelinedRegion regionOld = adapter.getPipelinedRegionOfVertex(new ExecutionVertexID(ejv1.getJobVertexId(), 0));
executionGraph.initializeJobVertex(ejv2, 0L);
adapter.notifyExecutionGraphUpdated(executionGraph, Collections.singletonList(ejv2));
SchedulingPipelinedRegion regionNew = adapter.getPipelinedRegionOfVertex(new ExecutionVertexID(ejv1.getJobVertexId(), 0));
assertSame(regionOld, regionNew);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.
the class DefaultExecutionTopologyTest method testGetVertexOrThrow.
@Test
public void testGetVertexOrThrow() {
try {
adapter.getVertex(new ExecutionVertexID(new JobVertexID(), 0));
fail("get not exist vertex");
} catch (IllegalArgumentException exception) {
// expected
}
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.
the class DefaultResultPartitionTest method testGetConsumerVertexGroup.
@Test
public void testGetConsumerVertexGroup() {
assertFalse(resultPartition.getConsumerVertexGroup().isPresent());
// test update consumers
ExecutionVertexID executionVertexId = new ExecutionVertexID(new JobVertexID(), 0);
consumerVertexGroups.put(resultPartition.getId(), ConsumerVertexGroup.fromSingleVertex(executionVertexId));
assertTrue(resultPartition.getConsumerVertexGroup().isPresent());
assertThat(resultPartition.getConsumerVertexGroup().get(), contains(executionVertexId));
}
Aggregations