Search in sources :

Example 21 with Scheduler

use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.

the class ExecutionGraphConstructionTest method testAttachViaIds.

@Test
public void testAttachViaIds() throws Exception {
    final JobID jobId = new JobID();
    final String jobName = "Test Job Sample Name";
    final Configuration cfg = new Configuration();
    // construct part one of the execution graph
    JobVertex v1 = new JobVertex("vertex1");
    JobVertex v2 = new JobVertex("vertex2");
    JobVertex v3 = new JobVertex("vertex3");
    v1.setParallelism(5);
    v2.setParallelism(7);
    v3.setParallelism(2);
    v1.setInvokableClass(AbstractInvokable.class);
    v2.setInvokableClass(AbstractInvokable.class);
    v3.setInvokableClass(AbstractInvokable.class);
    // this creates an intermediate result for v1
    v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    // create results for v2 and v3
    IntermediateDataSet v2result = v2.createAndAddResultDataSet(ResultPartitionType.PIPELINED);
    IntermediateDataSet v3result_1 = v3.createAndAddResultDataSet(ResultPartitionType.PIPELINED);
    IntermediateDataSet v3result_2 = v3.createAndAddResultDataSet(ResultPartitionType.PIPELINED);
    List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3));
    ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
    try {
        eg.attachJobGraph(ordered);
    } catch (JobException e) {
        e.printStackTrace();
        fail("Job failed with exception: " + e.getMessage());
    }
    // attach the second part of the graph
    JobVertex v4 = new JobVertex("vertex4");
    JobVertex v5 = new JobVertex("vertex5");
    v4.setParallelism(11);
    v5.setParallelism(4);
    v4.setInvokableClass(AbstractInvokable.class);
    v5.setInvokableClass(AbstractInvokable.class);
    v4.connectIdInput(v2result.getId(), DistributionPattern.ALL_TO_ALL);
    v4.connectIdInput(v3result_1.getId(), DistributionPattern.ALL_TO_ALL);
    v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    v5.connectIdInput(v3result_2.getId(), DistributionPattern.ALL_TO_ALL);
    List<JobVertex> ordered2 = new ArrayList<JobVertex>(Arrays.asList(v4, v5));
    try {
        eg.attachJobGraph(ordered2);
    } catch (JobException e) {
        e.printStackTrace();
        fail("Job failed with exception: " + e.getMessage());
    }
    // verify
    verifyTestGraph(eg, jobId, v1, v2, v3, v4, v5);
}
Also used : IntermediateDataSet(org.apache.flink.runtime.jobgraph.IntermediateDataSet) Configuration(org.apache.flink.configuration.Configuration) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) ArrayList(java.util.ArrayList) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) JobException(org.apache.flink.runtime.JobException) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 22 with Scheduler

use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.

the class ExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.

@Test
public void testBuildDeploymentDescriptor() {
    try {
        final JobID jobId = new JobID();
        final JobVertexID jid1 = new JobVertexID();
        final JobVertexID jid2 = new JobVertexID();
        final JobVertexID jid3 = new JobVertexID();
        final JobVertexID jid4 = new JobVertexID();
        JobVertex v1 = new JobVertex("v1", jid1);
        JobVertex v2 = new JobVertex("v2", jid2);
        JobVertex v3 = new JobVertex("v3", jid3);
        JobVertex v4 = new JobVertex("v4", jid4);
        v1.setParallelism(10);
        v2.setParallelism(10);
        v3.setParallelism(10);
        v4.setParallelism(10);
        v1.setInvokableClass(BatchTask.class);
        v2.setInvokableClass(BatchTask.class);
        v3.setInvokableClass(BatchTask.class);
        v4.setInvokableClass(BatchTask.class);
        v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
        v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
        v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
        ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, "some job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
        List<JobVertex> ordered = Arrays.asList(v1, v2, v3, v4);
        eg.attachJobGraph(ordered);
        ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
        ExecutionVertex vertex = ejv.getTaskVertices()[3];
        ExecutionGraphTestUtils.SimpleActorGateway instanceGateway = new ExecutionGraphTestUtils.SimpleActorGateway(TestingUtils.directExecutionContext());
        final Instance instance = getInstance(new ActorTaskManagerGateway(instanceGateway));
        final SimpleSlot slot = instance.allocateSimpleSlot(jobId);
        assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
        vertex.deployToSlot(slot);
        assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
        TaskDeploymentDescriptor descr = instanceGateway.lastTDD;
        assertNotNull(descr);
        JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
        TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
        assertEquals(jobId, jobInformation.getJobId());
        assertEquals(jid2, taskInformation.getJobVertexId());
        assertEquals(3, descr.getSubtaskIndex());
        assertEquals(10, taskInformation.getNumberOfSubtasks());
        assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
        assertEquals("v2", taskInformation.getTaskName());
        Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
        Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
        assertEquals(2, producedPartitions.size());
        assertEquals(1, consumedPartitions.size());
        Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
        Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
        assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
        assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
        assertEquals(10, iteratorConsumedPartitions.next().getInputChannelDeploymentDescriptors().length);
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) Configuration(org.apache.flink.configuration.Configuration) Instance(org.apache.flink.runtime.instance.Instance) ExecutionGraphTestUtils.getInstance(org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.getInstance) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SimpleSlot(org.apache.flink.runtime.instance.SimpleSlot) ActorTaskManagerGateway(org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) BatchTask(org.apache.flink.runtime.operators.BatchTask) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 23 with Scheduler

use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.

the class ExecutionGraphCheckpointCoordinatorTest method createExecutionGraphAndEnableCheckpointing.

private ExecutionGraph createExecutionGraphAndEnableCheckpointing(CheckpointIDCounter counter, CompletedCheckpointStore store) throws Exception {
    ExecutionGraph executionGraph = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "test", new Configuration(), new SerializedValue<>(new ExecutionConfig()), Time.days(1L), new NoRestartStrategy(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), new Scheduler(TestingUtils.defaultExecutionContext()), ClassLoader.getSystemClassLoader(), new UnregisteredMetricsGroup());
    executionGraph.enableCheckpointing(100, 100, 100, 1, ExternalizedCheckpointSettings.none(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), Collections.<ExecutionJobVertex>emptyList(), counter, store, null, null, CheckpointStatsTrackerTest.createTestTracker());
    JobVertex jobVertex = new JobVertex("MockVertex");
    jobVertex.setInvokableClass(AbstractInvokable.class);
    executionGraph.attachJobGraph(Collections.singletonList(jobVertex));
    return executionGraph;
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) Configuration(org.apache.flink.configuration.Configuration) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) JobID(org.apache.flink.api.common.JobID)

Example 24 with Scheduler

use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.

the class ExecutionGraphRestartTest method testFailExecutionAfterCancel.

/**
	 * Tests that a graph is not restarted after cancellation via a call to
	 * {@link ExecutionGraph#fail(Throwable)}. This can happen when a slot is
	 * released concurrently with cancellation.
	 */
@Test
public void testFailExecutionAfterCancel() throws Exception {
    Instance instance = ExecutionGraphTestUtils.getInstance(new ActorTaskManagerGateway(new SimpleActorGateway(TestingUtils.directExecutionContext())), 2);
    Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
    scheduler.newInstanceAvailable(instance);
    JobVertex vertex = newJobVertex("Test Vertex", 1, NoOpInvokable.class);
    ExecutionConfig executionConfig = new ExecutionConfig();
    executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, Integer.MAX_VALUE));
    JobGraph jobGraph = new JobGraph("Test Job", vertex);
    jobGraph.setExecutionConfig(executionConfig);
    ExecutionGraph eg = newExecutionGraph(new InfiniteDelayRestartStrategy(), scheduler);
    eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
    assertEquals(JobStatus.CREATED, eg.getState());
    eg.scheduleForExecution();
    assertEquals(JobStatus.RUNNING, eg.getState());
    // Fail right after cancel (for example with concurrent slot release)
    eg.cancel();
    for (ExecutionVertex v : eg.getAllExecutionVertices()) {
        v.getCurrentExecutionAttempt().fail(new Exception("Test Exception"));
    }
    assertEquals(JobStatus.CANCELED, eg.getState());
    Execution execution = eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt();
    execution.cancelingComplete();
    assertEquals(JobStatus.CANCELED, eg.getState());
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) InfiniteDelayRestartStrategy(org.apache.flink.runtime.executiongraph.restart.InfiniteDelayRestartStrategy) Instance(org.apache.flink.runtime.instance.Instance) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SimpleActorGateway(org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.SimpleActorGateway) SuppressRestartsException(org.apache.flink.runtime.execution.SuppressRestartsException) IOException(java.io.IOException) ActorTaskManagerGateway(org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway) Test(org.junit.Test)

Example 25 with Scheduler

use of org.apache.flink.runtime.jobmanager.scheduler.Scheduler in project flink by apache.

the class ExecutionGraphSignalsTest method prepare.

@Before
public void prepare() throws Exception {
    final JobID jobId = new JobID();
    final String jobName = "Test Job Sample Name";
    final Configuration cfg = new Configuration();
    assert (mockEJV.length == 5);
    JobVertex v1 = new JobVertex("vertex1");
    JobVertex v2 = new JobVertex("vertex2");
    JobVertex v3 = new JobVertex("vertex3");
    JobVertex v4 = new JobVertex("vertex4");
    JobVertex v5 = new JobVertex("vertex5");
    for (int i = 0; i < mockEJV.length; ++i) {
        mockEJV[i] = mock(ExecutionJobVertex.class);
        this.mockEV[i] = new ExecutionVertex[dop[i]];
        for (int j = 0; j < dop[i]; ++j) {
            this.mockEV[i][j] = mock(ExecutionVertex.class);
        }
        when(mockEJV[i].getProducedDataSets()).thenReturn(new IntermediateResult[0]);
        when(mockEJV[i].getTaskVertices()).thenReturn(this.mockEV[i]);
    }
    PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v1), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[0]);
    PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v2), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[1]);
    PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v3), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[2]);
    PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v4), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[3]);
    PowerMockito.whenNew(ExecutionJobVertex.class).withArguments(any(ExecutionGraph.class), same(v5), any(Integer.class).intValue(), any(FiniteDuration.class), any(Long.class).longValue()).thenReturn(mockEJV[4]);
    v1.setParallelism(dop[0]);
    v2.setParallelism(dop[1]);
    v3.setParallelism(dop[2]);
    v4.setParallelism(dop[3]);
    v5.setParallelism(dop[4]);
    v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    mockNumberOfInputs(1, 0);
    v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    mockNumberOfInputs(3, 1);
    v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    mockNumberOfInputs(3, 2);
    v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    mockNumberOfInputs(4, 3);
    v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    mockNumberOfInputs(4, 2);
    List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
    eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
    eg.attachJobGraph(ordered);
    f = eg.getClass().getDeclaredField("state");
    f.setAccessible(true);
}
Also used : Configuration(org.apache.flink.configuration.Configuration) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) ArrayList(java.util.ArrayList) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SerializedValue(org.apache.flink.util.SerializedValue) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) JobID(org.apache.flink.api.common.JobID) Before(org.junit.Before)

Aggregations

Scheduler (org.apache.flink.runtime.jobmanager.scheduler.Scheduler)40 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)34 Test (org.junit.Test)32 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)29 NoRestartStrategy (org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy)25 JobID (org.apache.flink.api.common.JobID)22 Configuration (org.apache.flink.configuration.Configuration)21 ArrayList (java.util.ArrayList)17 JobException (org.apache.flink.runtime.JobException)17 ActorTaskManagerGateway (org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway)16 Instance (org.apache.flink.runtime.instance.Instance)14 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)11 IOException (java.io.IOException)9 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)9 SimpleSlot (org.apache.flink.runtime.instance.SimpleSlot)8 SimpleActorGateway (org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.SimpleActorGateway)7 SuppressRestartsException (org.apache.flink.runtime.execution.SuppressRestartsException)6 FlinkCompletableFuture (org.apache.flink.runtime.concurrent.impl.FlinkCompletableFuture)4 ExecutionGraphTestUtils.getInstance (org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.getInstance)4 IntermediateDataSet (org.apache.flink.runtime.jobgraph.IntermediateDataSet)4