Search in sources :

Example 61 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class SchedulerBase method getInvolvedExecutionJobVertices.

private Set<ExecutionJobVertex> getInvolvedExecutionJobVertices(final Set<ExecutionVertexID> executionVertices) {
    final Set<ExecutionJobVertex> tasks = new HashSet<>();
    for (ExecutionVertexID executionVertexID : executionVertices) {
        final ExecutionVertex executionVertex = getExecutionVertex(executionVertexID);
        tasks.add(executionVertex.getJobVertex());
    }
    return tasks;
}
Also used : ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex) HashSet(java.util.HashSet)

Example 62 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class SharedSlot method allocateNonExistentLogicalSlot.

private CompletableFuture<SingleLogicalSlot> allocateNonExistentLogicalSlot(ExecutionVertexID executionVertexId) {
    CompletableFuture<SingleLogicalSlot> logicalSlotFuture;
    SlotRequestId logicalSlotRequestId = new SlotRequestId();
    String logMessageBase = getLogicalSlotString(logicalSlotRequestId, executionVertexId);
    LOG.debug("Request a {}", logMessageBase);
    logicalSlotFuture = slotContextFuture.thenApply(physicalSlot -> {
        LOG.debug("Allocated {}", logMessageBase);
        return createLogicalSlot(physicalSlot, logicalSlotRequestId);
    });
    requestedLogicalSlots.put(executionVertexId, logicalSlotRequestId, logicalSlotFuture);
    // If the physical slot request fails (slotContextFuture), it will also fail the
    // logicalSlotFuture.
    // Therefore, the next `exceptionally` callback will call removeLogicalSlotRequest and do
    // the cleanup
    // in requestedLogicalSlots and eventually in sharedSlots
    logicalSlotFuture.exceptionally(cause -> {
        LOG.debug("Failed {}", logMessageBase, cause);
        removeLogicalSlotRequest(logicalSlotRequestId);
        return null;
    });
    return logicalSlotFuture;
}
Also used : SlotRequestId(org.apache.flink.runtime.jobmaster.SlotRequestId) DualKeyLinkedMap(org.apache.flink.runtime.util.DualKeyLinkedMap) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) Locality(org.apache.flink.runtime.jobmanager.scheduler.Locality) LogicalSlot(org.apache.flink.runtime.jobmaster.LogicalSlot) SlotOwner(org.apache.flink.runtime.jobmaster.SlotOwner) CompletableFuture(java.util.concurrent.CompletableFuture) SingleLogicalSlot(org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) Preconditions(org.apache.flink.util.Preconditions) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) Execution(org.apache.flink.runtime.executiongraph.Execution) Consumer(java.util.function.Consumer) PhysicalSlot(org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlot) Map(java.util.Map) SlotRequestId(org.apache.flink.runtime.jobmaster.SlotRequestId) Nullable(javax.annotation.Nullable) SingleLogicalSlot(org.apache.flink.runtime.jobmaster.slotpool.SingleLogicalSlot)

Example 63 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class UpdatePartitionConsumersTest method testUpdatePartitionConsumers.

/**
 * Test BLOCKING partition information are properly updated to consumers when its producer
 * finishes.
 */
@Test
public void testUpdatePartitionConsumers() throws Exception {
    final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
    final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setExecutionSlotAllocatorFactory(new TestExecutionSlotAllocatorFactory(taskManagerGateway)).build();
    final ExecutionVertex ev1 = scheduler.getExecutionVertex(new ExecutionVertexID(v1.getID(), 0));
    final ExecutionVertex ev2 = scheduler.getExecutionVertex(new ExecutionVertexID(v2.getID(), 0));
    final ExecutionVertex ev3 = scheduler.getExecutionVertex(new ExecutionVertexID(v3.getID(), 0));
    final ExecutionVertex ev4 = scheduler.getExecutionVertex(new ExecutionVertexID(v4.getID(), 0));
    final CompletableFuture<TaskDeploymentDescriptor> ev4TddFuture = new CompletableFuture<>();
    taskManagerGateway.setSubmitConsumer(tdd -> {
        if (tdd.getExecutionAttemptId().equals(ev4.getCurrentExecutionAttempt().getAttemptId())) {
            ev4TddFuture.complete(tdd);
        }
    });
    scheduler.startScheduling();
    assertThat(ev1.getExecutionState(), is(ExecutionState.DEPLOYING));
    assertThat(ev2.getExecutionState(), is(ExecutionState.DEPLOYING));
    assertThat(ev3.getExecutionState(), is(ExecutionState.DEPLOYING));
    assertThat(ev4.getExecutionState(), is(ExecutionState.DEPLOYING));
    updateState(scheduler, ev1, ExecutionState.INITIALIZING);
    updateState(scheduler, ev1, ExecutionState.RUNNING);
    updateState(scheduler, ev2, ExecutionState.INITIALIZING);
    updateState(scheduler, ev2, ExecutionState.RUNNING);
    updateState(scheduler, ev3, ExecutionState.INITIALIZING);
    updateState(scheduler, ev3, ExecutionState.RUNNING);
    updateState(scheduler, ev4, ExecutionState.INITIALIZING);
    updateState(scheduler, ev4, ExecutionState.RUNNING);
    final InputGateDeploymentDescriptor ev4Igdd2 = ev4TddFuture.get(TIMEOUT, TimeUnit.MILLISECONDS).getInputGates().get(1);
    assertThat(ev4Igdd2.getShuffleDescriptors()[0], instanceOf(UnknownShuffleDescriptor.class));
    final CompletableFuture<Void> updatePartitionFuture = new CompletableFuture<>();
    taskManagerGateway.setUpdatePartitionsConsumer((attemptId, partitionInfos, time) -> {
        assertThat(attemptId, equalTo(ev4.getCurrentExecutionAttempt().getAttemptId()));
        final List<PartitionInfo> partitionInfoList = IterableUtils.toStream(partitionInfos).collect(Collectors.toList());
        assertThat(partitionInfoList, hasSize(1));
        final PartitionInfo partitionInfo = partitionInfoList.get(0);
        assertThat(partitionInfo.getIntermediateDataSetID(), equalTo(v3.getProducedDataSets().get(0).getId()));
        assertThat(partitionInfo.getShuffleDescriptor(), instanceOf(NettyShuffleDescriptor.class));
        updatePartitionFuture.complete(null);
    });
    updateState(scheduler, ev1, ExecutionState.FINISHED);
    updateState(scheduler, ev3, ExecutionState.FINISHED);
    updatePartitionFuture.get(TIMEOUT, TimeUnit.MILLISECONDS);
}
Also used : NettyShuffleDescriptor(org.apache.flink.runtime.shuffle.NettyShuffleDescriptor) TestExecutionSlotAllocatorFactory(org.apache.flink.runtime.scheduler.TestExecutionSlotAllocatorFactory) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) UnknownShuffleDescriptor(org.apache.flink.runtime.shuffle.UnknownShuffleDescriptor) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) SimpleAckingTaskManagerGateway(org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway) CompletableFuture(java.util.concurrent.CompletableFuture) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) PartitionInfo(org.apache.flink.runtime.executiongraph.PartitionInfo) Test(org.junit.Test)

Example 64 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultExecutionVertexTest method setUp.

@Before
public void setUp() throws Exception {
    intermediateResultPartitionId = new IntermediateResultPartitionID();
    DefaultResultPartition schedulingResultPartition = new DefaultResultPartition(intermediateResultPartitionId, new IntermediateDataSetID(), BLOCKING, () -> ResultPartitionState.CREATED, () -> {
        throw new UnsupportedOperationException();
    }, () -> {
        throw new UnsupportedOperationException();
    });
    producerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.singletonList(schedulingResultPartition), stateSupplier, Collections.emptyList(), partitionID -> {
        throw new UnsupportedOperationException();
    });
    schedulingResultPartition.setProducer(producerVertex);
    List<ConsumedPartitionGroup> consumedPartitionGroups = Collections.singletonList(ConsumedPartitionGroup.fromSinglePartition(intermediateResultPartitionId));
    Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.singletonMap(intermediateResultPartitionId, schedulingResultPartition);
    consumerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), stateSupplier, consumedPartitionGroups, resultPartitionById::get);
}
Also used : IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) Test(org.junit.Test) IterableUtils(org.apache.flink.util.IterableUtils) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) Supplier(java.util.function.Supplier) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ResultPartitionState(org.apache.flink.runtime.scheduler.strategy.ResultPartitionState) SchedulingResultPartition(org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition) List(java.util.List) ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) BLOCKING(org.apache.flink.runtime.io.network.partition.ResultPartitionType.BLOCKING) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Before(org.junit.Before) ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Before(org.junit.Before)

Example 65 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulingPipelinedRegionTest method returnsIncidentBlockingPartitions.

/**
 * Tests if the consumed inputs of the pipelined regions are computed correctly using the Job
 * graph below.
 *
 * <pre>
 *          c
 *        /  X
 * a -+- b   e
 *       \  /
 *        d
 * </pre>
 *
 * <p>Pipelined regions: {a}, {b, c, d, e}
 */
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
    final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
    final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);
    b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    final DefaultExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
    final DefaultExecutionTopology topology = DefaultExecutionTopology.fromExecutionGraph(simpleTestGraph);
    final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
    final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));
    final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
    final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();
    final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = new HashSet<>();
    for (ConsumedPartitionGroup consumedPartitionGroup : secondPipelinedRegion.getAllBlockingConsumedPartitionGroups()) {
        for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
            if (!secondPipelinedRegion.contains(topology.getResultPartition(partitionId).getProducer().getId())) {
                secondPipelinedRegionConsumedResults.add(partitionId);
            }
        }
    }
    assertThat(firstPipelinedRegion.getAllBlockingConsumedPartitionGroups().iterator().hasNext(), is(false));
    assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) DefaultExecutionGraph(org.apache.flink.runtime.executiongraph.DefaultExecutionGraph) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)231 Test (org.junit.Test)165 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)63 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)57 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)54 SchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)51 Set (java.util.Set)48 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)45 AdaptiveSchedulerTest (org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest)45 TestingSchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex)45 Collection (java.util.Collection)33 TestingSchedulingTopology (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology)33 HashSet (java.util.HashSet)30 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)30 ArrayList (java.util.ArrayList)27 Map (java.util.Map)27 HashMap (java.util.HashMap)24 List (java.util.List)24 CompletableFuture (java.util.concurrent.CompletableFuture)24 TaskManagerLocation (org.apache.flink.runtime.taskmanager.TaskManagerLocation)24