Search in sources :

Example 41 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.

the class MergingSharedSlotProfileRetrieverTest method testGetEmptySlotProfile.

@Test
public void testGetEmptySlotProfile() throws ExecutionException, InterruptedException {
    SharedSlotProfileRetriever sharedSlotProfileRetriever = new MergingSharedSlotProfileRetrieverFactory(EMPTY_PREFERRED_LOCATIONS_RETRIEVER, executionVertexID -> Optional.of(new AllocationID()), () -> Collections.emptySet()).createFromBulk(Collections.emptySet());
    SlotProfile slotProfile = sharedSlotProfileRetriever.getSlotProfile(new ExecutionSlotSharingGroup(), ResourceProfile.ZERO);
    assertThat(slotProfile.getTaskResourceProfile(), is(ResourceProfile.ZERO));
    assertThat(slotProfile.getPhysicalSlotResourceProfile(), is(ResourceProfile.ZERO));
    assertThat(slotProfile.getPreferredLocations(), hasSize(0));
    assertThat(slotProfile.getPreferredAllocations(), hasSize(0));
    assertThat(slotProfile.getReservedAllocations(), hasSize(0));
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) HashMap(java.util.HashMap) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) MemorySize(org.apache.flink.configuration.MemorySize) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) InetAddress(java.net.InetAddress) HashSet(java.util.HashSet) Assert.assertThat(org.junit.Assert.assertThat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) Collection(java.util.Collection) Test(org.junit.Test) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) UnknownHostException(java.net.UnknownHostException) Collectors(java.util.stream.Collectors) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) Collections(java.util.Collections) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) Test(org.junit.Test)

Example 42 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.

the class MergingSharedSlotProfileRetrieverTest method testPreferredLocationsOfSlotProfile.

@Test
public void testPreferredLocationsOfSlotProfile() throws ExecutionException, InterruptedException {
    // preferred locations
    List<ExecutionVertexID> executions = IntStream.range(0, 3).mapToObj(i -> new ExecutionVertexID(new JobVertexID(), 0)).collect(Collectors.toList());
    List<TaskManagerLocation> allLocations = executions.stream().map(e -> createTaskManagerLocation()).collect(Collectors.toList());
    Map<ExecutionVertexID, Collection<TaskManagerLocation>> locations = new HashMap<>();
    locations.put(executions.get(0), Arrays.asList(allLocations.get(0), allLocations.get(1)));
    locations.put(executions.get(1), Arrays.asList(allLocations.get(1), allLocations.get(2)));
    List<AllocationID> prevAllocationIds = Collections.nCopies(3, new AllocationID());
    SlotProfile slotProfile = getSlotProfile((executionVertexId, producersToIgnore) -> {
        assertThat(producersToIgnore, containsInAnyOrder(executions.toArray()));
        return locations.get(executionVertexId);
    }, executions, ResourceProfile.ZERO, prevAllocationIds, prevAllocationIds, 2);
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(0)::equals).count(), is(1L));
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(1)::equals).count(), is(2L));
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(2)::equals).count(), is(1L));
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) HashMap(java.util.HashMap) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) MemorySize(org.apache.flink.configuration.MemorySize) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) InetAddress(java.net.InetAddress) HashSet(java.util.HashSet) Assert.assertThat(org.junit.Assert.assertThat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) Collection(java.util.Collection) Test(org.junit.Test) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) UnknownHostException(java.net.UnknownHostException) Collectors(java.util.stream.Collectors) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) Collections(java.util.Collections) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) HashMap(java.util.HashMap) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) Collection(java.util.Collection) Test(org.junit.Test)

Example 43 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.

the class DefaultExecutionVertexTest method setUp.

@Before
public void setUp() throws Exception {
    intermediateResultPartitionId = new IntermediateResultPartitionID();
    DefaultResultPartition schedulingResultPartition = new DefaultResultPartition(intermediateResultPartitionId, new IntermediateDataSetID(), BLOCKING, () -> ResultPartitionState.CREATED, () -> {
        throw new UnsupportedOperationException();
    }, () -> {
        throw new UnsupportedOperationException();
    });
    producerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.singletonList(schedulingResultPartition), stateSupplier, Collections.emptyList(), partitionID -> {
        throw new UnsupportedOperationException();
    });
    schedulingResultPartition.setProducer(producerVertex);
    List<ConsumedPartitionGroup> consumedPartitionGroups = Collections.singletonList(ConsumedPartitionGroup.fromSinglePartition(intermediateResultPartitionId));
    Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.singletonMap(intermediateResultPartitionId, schedulingResultPartition);
    consumerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), stateSupplier, consumedPartitionGroups, resultPartitionById::get);
}
Also used : IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) Test(org.junit.Test) IterableUtils(org.apache.flink.util.IterableUtils) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) Supplier(java.util.function.Supplier) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ResultPartitionState(org.apache.flink.runtime.scheduler.strategy.ResultPartitionState) SchedulingResultPartition(org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition) List(java.util.List) ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) BLOCKING(org.apache.flink.runtime.io.network.partition.ResultPartitionType.BLOCKING) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Before(org.junit.Before) ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Before(org.junit.Before)

Example 44 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.

the class DefaultExecutionGraph method createResultPartitionId.

ResultPartitionID createResultPartitionId(final IntermediateResultPartitionID resultPartitionId) {
    final SchedulingResultPartition schedulingResultPartition = getSchedulingTopology().getResultPartition(resultPartitionId);
    final SchedulingExecutionVertex producer = schedulingResultPartition.getProducer();
    final ExecutionVertexID producerId = producer.getId();
    final JobVertexID jobVertexId = producerId.getJobVertexId();
    final ExecutionJobVertex jobVertex = getJobVertex(jobVertexId);
    checkNotNull(jobVertex, "Unknown job vertex %s", jobVertexId);
    final ExecutionVertex[] taskVertices = jobVertex.getTaskVertices();
    final int subtaskIndex = producerId.getSubtaskIndex();
    checkState(subtaskIndex < taskVertices.length, "Invalid subtask index %d for job vertex %s", subtaskIndex, jobVertexId);
    final ExecutionVertex taskVertex = taskVertices[subtaskIndex];
    final Execution execution = taskVertex.getCurrentExecutionAttempt();
    return new ResultPartitionID(resultPartitionId, execution.getAttemptId());
}
Also used : SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex) SchedulingResultPartition(org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)

Example 45 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.

the class SlotSharingSlotAllocator method tryReserveResources.

@Override
public Optional<ReservedSlots> tryReserveResources(VertexParallelism vertexParallelism) {
    Preconditions.checkArgument(vertexParallelism instanceof VertexParallelismWithSlotSharing, String.format("%s expects %s as argument.", SlotSharingSlotAllocator.class.getSimpleName(), VertexParallelismWithSlotSharing.class.getSimpleName()));
    final VertexParallelismWithSlotSharing vertexParallelismWithSlotSharing = (VertexParallelismWithSlotSharing) vertexParallelism;
    final Collection<AllocationID> expectedSlots = calculateExpectedSlots(vertexParallelismWithSlotSharing.getAssignments());
    if (areAllExpectedSlotsAvailableAndFree(expectedSlots)) {
        final Map<ExecutionVertexID, LogicalSlot> assignedSlots = new HashMap<>();
        for (ExecutionSlotSharingGroupAndSlot executionSlotSharingGroup : vertexParallelismWithSlotSharing.getAssignments()) {
            final SharedSlot sharedSlot = reserveSharedSlot(executionSlotSharingGroup.getSlotInfo());
            for (ExecutionVertexID executionVertexId : executionSlotSharingGroup.getExecutionSlotSharingGroup().getContainedExecutionVertices()) {
                final LogicalSlot logicalSlot = sharedSlot.allocateLogicalSlot();
                assignedSlots.put(executionVertexId, logicalSlot);
            }
        }
        return Optional.of(ReservedSlots.create(assignedSlots));
    } else {
        return Optional.empty();
    }
}
Also used : HashMap(java.util.HashMap) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) LogicalSlot(org.apache.flink.runtime.jobmaster.LogicalSlot)

Aggregations

ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)77 Test (org.junit.Test)55 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)21 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)19 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)18 SchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)17 Set (java.util.Set)16 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)15 AdaptiveSchedulerTest (org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest)15 TestingSchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex)15 Collection (java.util.Collection)11 TestingSchedulingTopology (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology)11 HashSet (java.util.HashSet)10 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)10 ArrayList (java.util.ArrayList)9 Map (java.util.Map)9 HashMap (java.util.HashMap)8 List (java.util.List)8 CompletableFuture (java.util.concurrent.CompletableFuture)8 ArchivedExecutionVertex (org.apache.flink.runtime.executiongraph.ArchivedExecutionVertex)7