use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class MergingSharedSlotProfileRetrieverTest method testGetEmptySlotProfile.
@Test
public void testGetEmptySlotProfile() throws ExecutionException, InterruptedException {
SharedSlotProfileRetriever sharedSlotProfileRetriever = new MergingSharedSlotProfileRetrieverFactory(EMPTY_PREFERRED_LOCATIONS_RETRIEVER, executionVertexID -> Optional.of(new AllocationID()), () -> Collections.emptySet()).createFromBulk(Collections.emptySet());
SlotProfile slotProfile = sharedSlotProfileRetriever.getSlotProfile(new ExecutionSlotSharingGroup(), ResourceProfile.ZERO);
assertThat(slotProfile.getTaskResourceProfile(), is(ResourceProfile.ZERO));
assertThat(slotProfile.getPhysicalSlotResourceProfile(), is(ResourceProfile.ZERO));
assertThat(slotProfile.getPreferredLocations(), hasSize(0));
assertThat(slotProfile.getPreferredAllocations(), hasSize(0));
assertThat(slotProfile.getReservedAllocations(), hasSize(0));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class MergingSharedSlotProfileRetrieverTest method testPreferredLocationsOfSlotProfile.
@Test
public void testPreferredLocationsOfSlotProfile() throws ExecutionException, InterruptedException {
// preferred locations
List<ExecutionVertexID> executions = IntStream.range(0, 3).mapToObj(i -> new ExecutionVertexID(new JobVertexID(), 0)).collect(Collectors.toList());
List<TaskManagerLocation> allLocations = executions.stream().map(e -> createTaskManagerLocation()).collect(Collectors.toList());
Map<ExecutionVertexID, Collection<TaskManagerLocation>> locations = new HashMap<>();
locations.put(executions.get(0), Arrays.asList(allLocations.get(0), allLocations.get(1)));
locations.put(executions.get(1), Arrays.asList(allLocations.get(1), allLocations.get(2)));
List<AllocationID> prevAllocationIds = Collections.nCopies(3, new AllocationID());
SlotProfile slotProfile = getSlotProfile((executionVertexId, producersToIgnore) -> {
assertThat(producersToIgnore, containsInAnyOrder(executions.toArray()));
return locations.get(executionVertexId);
}, executions, ResourceProfile.ZERO, prevAllocationIds, prevAllocationIds, 2);
assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(0)::equals).count(), is(1L));
assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(1)::equals).count(), is(2L));
assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(2)::equals).count(), is(1L));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultExecutionVertexTest method setUp.
@Before
public void setUp() throws Exception {
intermediateResultPartitionId = new IntermediateResultPartitionID();
DefaultResultPartition schedulingResultPartition = new DefaultResultPartition(intermediateResultPartitionId, new IntermediateDataSetID(), BLOCKING, () -> ResultPartitionState.CREATED, () -> {
throw new UnsupportedOperationException();
}, () -> {
throw new UnsupportedOperationException();
});
producerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.singletonList(schedulingResultPartition), stateSupplier, Collections.emptyList(), partitionID -> {
throw new UnsupportedOperationException();
});
schedulingResultPartition.setProducer(producerVertex);
List<ConsumedPartitionGroup> consumedPartitionGroups = Collections.singletonList(ConsumedPartitionGroup.fromSinglePartition(intermediateResultPartitionId));
Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.singletonMap(intermediateResultPartitionId, schedulingResultPartition);
consumerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), stateSupplier, consumedPartitionGroups, resultPartitionById::get);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultExecutionGraph method createResultPartitionId.
ResultPartitionID createResultPartitionId(final IntermediateResultPartitionID resultPartitionId) {
final SchedulingResultPartition schedulingResultPartition = getSchedulingTopology().getResultPartition(resultPartitionId);
final SchedulingExecutionVertex producer = schedulingResultPartition.getProducer();
final ExecutionVertexID producerId = producer.getId();
final JobVertexID jobVertexId = producerId.getJobVertexId();
final ExecutionJobVertex jobVertex = getJobVertex(jobVertexId);
checkNotNull(jobVertex, "Unknown job vertex %s", jobVertexId);
final ExecutionVertex[] taskVertices = jobVertex.getTaskVertices();
final int subtaskIndex = producerId.getSubtaskIndex();
checkState(subtaskIndex < taskVertices.length, "Invalid subtask index %d for job vertex %s", subtaskIndex, jobVertexId);
final ExecutionVertex taskVertex = taskVertices[subtaskIndex];
final Execution execution = taskVertex.getCurrentExecutionAttempt();
return new ResultPartitionID(resultPartitionId, execution.getAttemptId());
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class SlotSharingSlotAllocator method tryReserveResources.
@Override
public Optional<ReservedSlots> tryReserveResources(VertexParallelism vertexParallelism) {
Preconditions.checkArgument(vertexParallelism instanceof VertexParallelismWithSlotSharing, String.format("%s expects %s as argument.", SlotSharingSlotAllocator.class.getSimpleName(), VertexParallelismWithSlotSharing.class.getSimpleName()));
final VertexParallelismWithSlotSharing vertexParallelismWithSlotSharing = (VertexParallelismWithSlotSharing) vertexParallelism;
final Collection<AllocationID> expectedSlots = calculateExpectedSlots(vertexParallelismWithSlotSharing.getAssignments());
if (areAllExpectedSlotsAvailableAndFree(expectedSlots)) {
final Map<ExecutionVertexID, LogicalSlot> assignedSlots = new HashMap<>();
for (ExecutionSlotSharingGroupAndSlot executionSlotSharingGroup : vertexParallelismWithSlotSharing.getAssignments()) {
final SharedSlot sharedSlot = reserveSharedSlot(executionSlotSharingGroup.getSlotInfo());
for (ExecutionVertexID executionVertexId : executionSlotSharingGroup.getExecutionSlotSharingGroup().getContainedExecutionVertices()) {
final LogicalSlot logicalSlot = sharedSlot.allocateLogicalSlot();
assignedSlots.put(executionVertexId, logicalSlot);
}
}
return Optional.of(ReservedSlots.create(assignedSlots));
} else {
return Optional.empty();
}
}
Aggregations