Search in sources :

Example 91 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class FileBufferReaderITCase method createJobGraph.

private static JobGraph createJobGraph() {
    final SlotSharingGroup group1 = new SlotSharingGroup();
    final SlotSharingGroup group2 = new SlotSharingGroup();
    final JobVertex source = new JobVertex("source");
    source.setInvokableClass(TestSourceInvokable.class);
    source.setParallelism(parallelism);
    source.setSlotSharingGroup(group1);
    final JobVertex sink = new JobVertex("sink");
    sink.setInvokableClass(TestSinkInvokable.class);
    sink.setParallelism(parallelism);
    sink.setSlotSharingGroup(group2);
    sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
    return JobGraphTestUtils.batchJobGraph(source, sink);
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup)

Example 92 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class AdaptiveBatchSchedulerTest method testAdaptiveBatchScheduler.

@Test
public void testAdaptiveBatchScheduler() throws Exception {
    JobGraph jobGraph = createJobGraph(false);
    Iterator<JobVertex> jobVertexIterator = jobGraph.getVertices().iterator();
    JobVertex source1 = jobVertexIterator.next();
    JobVertex source2 = jobVertexIterator.next();
    JobVertex sink = jobVertexIterator.next();
    SchedulerBase scheduler = createScheduler(jobGraph);
    final DefaultExecutionGraph graph = (DefaultExecutionGraph) scheduler.getExecutionGraph();
    final ExecutionJobVertex sinkExecutionJobVertex = graph.getJobVertex(sink.getID());
    scheduler.startScheduling();
    assertThat(sinkExecutionJobVertex.getParallelism(), is(-1));
    // trigger source1 finished.
    transitionExecutionsState(scheduler, ExecutionState.FINISHED, source1);
    assertThat(sinkExecutionJobVertex.getParallelism(), is(-1));
    // trigger source2 finished.
    transitionExecutionsState(scheduler, ExecutionState.FINISHED, source2);
    assertThat(sinkExecutionJobVertex.getParallelism(), is(10));
    // check that the jobGraph is updated
    assertThat(sink.getParallelism(), is(10));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) DefaultExecutionGraph(org.apache.flink.runtime.executiongraph.DefaultExecutionGraph) Test(org.junit.Test)

Example 93 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class DeployingDownstreamTasksInBatchJobBenchmark method setup.

@Override
public void setup(JobConfiguration jobConfiguration) throws Exception {
    super.setup(jobConfiguration);
    final JobVertex source = jobVertices.get(0);
    for (ExecutionVertex ev : executionGraph.getJobVertex(source.getID()).getTaskVertices()) {
        Execution execution = ev.getCurrentExecutionAttempt();
        execution.transitionState(ExecutionState.SCHEDULED);
        execution.deploy();
    }
    final JobVertex sink = jobVertices.get(1);
    vertices = executionGraph.getJobVertex(sink.getID()).getTaskVertices();
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Execution(org.apache.flink.runtime.executiongraph.Execution) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex)

Example 94 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class PipelinedRegionSchedulingStrategyTest method createJobVertex.

private static JobVertex createJobVertex(String vertexName, int parallelism) {
    JobVertex jobVertex = new JobVertex(vertexName);
    jobVertex.setParallelism(parallelism);
    jobVertex.setInvokableClass(AbstractInvokable.class);
    return jobVertex;
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex)

Example 95 with JobVertex

use of org.apache.flink.runtime.jobgraph.JobVertex in project flink by apache.

the class PipelinedRegionSchedulingStrategyTest method testSchedulingTopologyWithCrossRegionConsumedPartitionGroups.

@Test
public void testSchedulingTopologyWithCrossRegionConsumedPartitionGroups() throws Exception {
    final JobVertex v1 = createJobVertex("v1", 4);
    final JobVertex v2 = createJobVertex("v2", 3);
    final JobVertex v3 = createJobVertex("v3", 2);
    v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    v3.connectNewDataSetAsInput(v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    v3.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
    final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
    final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).build();
    final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
    // Test whether the topology is built correctly
    final List<SchedulingPipelinedRegion> regions = new ArrayList<>();
    schedulingTopology.getAllPipelinedRegions().forEach(regions::add);
    assertEquals(2, regions.size());
    final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0];
    final Set<ExecutionVertexID> region1 = new HashSet<>();
    schedulingTopology.getPipelinedRegionOfVertex(v31.getID()).getVertices().forEach(vertex -> region1.add(vertex.getId()));
    assertEquals(5, region1.size());
    final ExecutionVertex v32 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[1];
    final Set<ExecutionVertexID> region2 = new HashSet<>();
    schedulingTopology.getPipelinedRegionOfVertex(v32.getID()).getVertices().forEach(vertex -> region2.add(vertex.getId()));
    assertEquals(4, region2.size());
    // Test whether region 1 is scheduled correctly
    PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology);
    assertEquals(1, testingSchedulerOperation.getScheduledVertices().size());
    final List<ExecutionVertexDeploymentOption> deploymentOptions1 = testingSchedulerOperation.getScheduledVertices().get(0);
    assertEquals(5, deploymentOptions1.size());
    for (ExecutionVertexDeploymentOption deploymentOption : deploymentOptions1) {
        assertTrue(region1.contains(deploymentOption.getExecutionVertexId()));
    }
    // Test whether the region 2 is scheduled correctly when region 1 is finished
    final ExecutionVertex v22 = executionGraph.getJobVertex(v2.getID()).getTaskVertices()[1];
    v22.finishAllBlockingPartitions();
    schedulingStrategy.onExecutionStateChange(v22.getID(), ExecutionState.FINISHED);
    assertEquals(2, testingSchedulerOperation.getScheduledVertices().size());
    final List<ExecutionVertexDeploymentOption> deploymentOptions2 = testingSchedulerOperation.getScheduledVertices().get(1);
    assertEquals(4, deploymentOptions2.size());
    for (ExecutionVertexDeploymentOption deploymentOption : deploymentOptions2) {
        assertTrue(region2.contains(deploymentOption.getExecutionVertexId()));
    }
}
Also used : ArrayList(java.util.ArrayList) ExecutionVertexDeploymentOption(org.apache.flink.runtime.scheduler.ExecutionVertexDeploymentOption) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)378 Test (org.junit.Test)230 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)197 Configuration (org.apache.flink.configuration.Configuration)74 JobID (org.apache.flink.api.common.JobID)60 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)58 ArrayList (java.util.ArrayList)57 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)47 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)44 SlotSharingGroup (org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup)41 SchedulerBase (org.apache.flink.runtime.scheduler.SchedulerBase)35 HashMap (java.util.HashMap)30 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)29 IOException (java.io.IOException)24 ExecutionGraph (org.apache.flink.runtime.executiongraph.ExecutionGraph)24 TaskConfig (org.apache.flink.runtime.operators.util.TaskConfig)24 Set (java.util.Set)23 JobException (org.apache.flink.runtime.JobException)23 Scheduler (org.apache.flink.runtime.jobmanager.scheduler.Scheduler)23 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)22