Search in sources :

Example 81 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultPreferredLocationsRetrieverTest method testInputLocationsIgnoresExcludedProducers.

@Test
public void testInputLocationsIgnoresExcludedProducers() {
    final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder = new TestingInputsLocationsRetriever.Builder();
    final ExecutionVertexID consumerId = new ExecutionVertexID(new JobVertexID(), 0);
    final JobVertexID producerJobVertexId = new JobVertexID();
    final ExecutionVertexID producerId1 = new ExecutionVertexID(producerJobVertexId, 0);
    locationRetrieverBuilder.connectConsumerToProducer(consumerId, producerId1);
    final ExecutionVertexID producerId2 = new ExecutionVertexID(producerJobVertexId, 1);
    locationRetrieverBuilder.connectConsumerToProducer(consumerId, producerId2);
    final TestingInputsLocationsRetriever inputsLocationsRetriever = locationRetrieverBuilder.build();
    inputsLocationsRetriever.markScheduled(producerId1);
    inputsLocationsRetriever.markScheduled(producerId2);
    inputsLocationsRetriever.assignTaskManagerLocation(producerId1);
    inputsLocationsRetriever.assignTaskManagerLocation(producerId2);
    final PreferredLocationsRetriever locationsRetriever = new DefaultPreferredLocationsRetriever(id -> Optional.empty(), inputsLocationsRetriever);
    final CompletableFuture<Collection<TaskManagerLocation>> preferredLocations = locationsRetriever.getPreferredLocations(consumerId, Collections.singleton(producerId1));
    assertThat(preferredLocations.getNow(null), hasSize(1));
    final TaskManagerLocation producerLocation2 = inputsLocationsRetriever.getTaskManagerLocation(producerId2).get().getNow(null);
    assertThat(preferredLocations.getNow(null), contains(producerLocation2));
}
Also used : ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) LocalTaskManagerLocation(org.apache.flink.runtime.taskmanager.LocalTaskManagerLocation) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) Collection(java.util.Collection) Test(org.junit.Test)

Example 82 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class MergingSharedSlotProfileRetrieverTest method testPreferredLocationsOfSlotProfile.

@Test
public void testPreferredLocationsOfSlotProfile() throws ExecutionException, InterruptedException {
    // preferred locations
    List<ExecutionVertexID> executions = IntStream.range(0, 3).mapToObj(i -> new ExecutionVertexID(new JobVertexID(), 0)).collect(Collectors.toList());
    List<TaskManagerLocation> allLocations = executions.stream().map(e -> createTaskManagerLocation()).collect(Collectors.toList());
    Map<ExecutionVertexID, Collection<TaskManagerLocation>> locations = new HashMap<>();
    locations.put(executions.get(0), Arrays.asList(allLocations.get(0), allLocations.get(1)));
    locations.put(executions.get(1), Arrays.asList(allLocations.get(1), allLocations.get(2)));
    List<AllocationID> prevAllocationIds = Collections.nCopies(3, new AllocationID());
    SlotProfile slotProfile = getSlotProfile((executionVertexId, producersToIgnore) -> {
        assertThat(producersToIgnore, containsInAnyOrder(executions.toArray()));
        return locations.get(executionVertexId);
    }, executions, ResourceProfile.ZERO, prevAllocationIds, prevAllocationIds, 2);
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(0)::equals).count(), is(1L));
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(1)::equals).count(), is(2L));
    assertThat(slotProfile.getPreferredLocations().stream().filter(allLocations.get(2)::equals).count(), is(1L));
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) HashMap(java.util.HashMap) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) MemorySize(org.apache.flink.configuration.MemorySize) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) InetAddress(java.net.InetAddress) HashSet(java.util.HashSet) Assert.assertThat(org.junit.Assert.assertThat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) Collection(java.util.Collection) Test(org.junit.Test) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) UnknownHostException(java.net.UnknownHostException) Collectors(java.util.stream.Collectors) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) Collections(java.util.Collections) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) HashMap(java.util.HashMap) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) SlotProfile(org.apache.flink.runtime.clusterframework.types.SlotProfile) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) Collection(java.util.Collection) Test(org.junit.Test)

Example 83 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method suspendJobWillIncrementVertexVersions.

@Test
public void suspendJobWillIncrementVertexVersions() throws Exception {
    final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
    final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
    final ExecutionVertexID onlyExecutionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
    final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
    final ExecutionVertexVersion executionVertexVersion = executionVertexVersioner.getExecutionVertexVersion(onlyExecutionVertexId);
    scheduler.close();
    assertTrue(executionVertexVersioner.isModified(executionVertexVersion));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Example 84 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class ExecutionGraphToInputsLocationsRetrieverAdapterTest method testGetConsumedResultPartitionsProducers.

/**
 * Tests that can get the producers of consumed result partitions.
 */
@Test
public void testGetConsumedResultPartitionsProducers() throws Exception {
    final JobVertex producer1 = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex producer2 = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex consumer = ExecutionGraphTestUtils.createNoOpVertex(1);
    consumer.connectNewDataSetAsInput(producer1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    consumer.connectNewDataSetAsInput(producer2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(producer1, producer2, consumer);
    final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
    ExecutionVertexID evIdOfProducer1 = new ExecutionVertexID(producer1.getID(), 0);
    ExecutionVertexID evIdOfProducer2 = new ExecutionVertexID(producer2.getID(), 0);
    ExecutionVertexID evIdOfConsumer = new ExecutionVertexID(consumer.getID(), 0);
    Collection<Collection<ExecutionVertexID>> producersOfProducer1 = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfProducer1);
    Collection<Collection<ExecutionVertexID>> producersOfProducer2 = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfProducer2);
    Collection<Collection<ExecutionVertexID>> producersOfConsumer = inputsLocationsRetriever.getConsumedResultPartitionsProducers(evIdOfConsumer);
    assertThat(producersOfProducer1, is(empty()));
    assertThat(producersOfProducer2, is(empty()));
    assertThat(producersOfConsumer, hasSize(2));
    assertThat(producersOfConsumer, hasItem(Collections.singletonList(evIdOfProducer1)));
    assertThat(producersOfConsumer, hasItem(Collections.singletonList(evIdOfProducer2)));
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) Collection(java.util.Collection) Test(org.junit.Test)

Example 85 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class SlotSharingSlotAllocatorTest method testReserveAvailableResources.

@Test
public void testReserveAvailableResources() {
    final SlotSharingSlotAllocator slotAllocator = SlotSharingSlotAllocator.createSlotSharingSlotAllocator(TEST_RESERVE_SLOT_FUNCTION, TEST_FREE_SLOT_FUNCTION, TEST_IS_SLOT_FREE_FUNCTION);
    final JobInformation jobInformation = new TestJobInformation(Arrays.asList(vertex1, vertex2, vertex3));
    final VertexParallelismWithSlotSharing slotAssignments = slotAllocator.determineParallelism(jobInformation, getSlots(50)).get();
    final ReservedSlots reservedSlots = slotAllocator.tryReserveResources(slotAssignments).orElseThrow(() -> new RuntimeException("Expected that reservation succeeds."));
    final Map<ExecutionVertexID, SlotInfo> expectedAssignments = new HashMap<>();
    for (SlotSharingSlotAllocator.ExecutionSlotSharingGroupAndSlot assignment : slotAssignments.getAssignments()) {
        for (ExecutionVertexID containedExecutionVertex : assignment.getExecutionSlotSharingGroup().getContainedExecutionVertices()) {
            expectedAssignments.put(containedExecutionVertex, assignment.getSlotInfo());
        }
    }
    for (Map.Entry<ExecutionVertexID, SlotInfo> expectedAssignment : expectedAssignments.entrySet()) {
        final LogicalSlot assignedSlot = reservedSlots.getSlotFor(expectedAssignment.getKey());
        final SlotInfo backingSlot = expectedAssignment.getValue();
        assertThat(assignedSlot.getAllocationId(), is(backingSlot.getAllocationId()));
    }
}
Also used : HashMap(java.util.HashMap) LogicalSlot(org.apache.flink.runtime.jobmaster.LogicalSlot) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) SlotInfo(org.apache.flink.runtime.jobmaster.SlotInfo) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)231 Test (org.junit.Test)165 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)63 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)57 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)54 SchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)51 Set (java.util.Set)48 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)45 AdaptiveSchedulerTest (org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest)45 TestingSchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex)45 Collection (java.util.Collection)33 TestingSchedulingTopology (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology)33 HashSet (java.util.HashSet)30 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)30 ArrayList (java.util.ArrayList)27 Map (java.util.Map)27 HashMap (java.util.HashMap)24 List (java.util.List)24 CompletableFuture (java.util.concurrent.CompletableFuture)24 TaskManagerLocation (org.apache.flink.runtime.taskmanager.TaskManagerLocation)24