Search in sources :

Example 56 with JobClient

use of org.apache.flink.core.execution.JobClient in project flink by apache.

the class LocalExecutorITCase method testLocalExecutorWithWordCount.

@Test(timeout = 60_000)
public void testLocalExecutorWithWordCount() throws InterruptedException {
    try {
        // set up the files
        File inFile = File.createTempFile("wctext", ".in");
        File outFile = File.createTempFile("wctext", ".out");
        inFile.deleteOnExit();
        outFile.deleteOnExit();
        try (FileWriter fw = new FileWriter(inFile)) {
            fw.write(WordCountData.TEXT);
        }
        final Configuration config = new Configuration();
        config.setBoolean(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE, true);
        config.setBoolean(DeploymentOptions.ATTACHED, true);
        Plan wcPlan = getWordCountPlan(inFile, outFile, parallelism);
        wcPlan.setExecutionConfig(new ExecutionConfig());
        JobClient jobClient = executor.execute(wcPlan, config, ClassLoader.getSystemClassLoader()).get();
        jobClient.getJobExecutionResult().get();
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }
    assertThat(miniCluster.isRunning(), is(false));
}
Also used : Configuration(org.apache.flink.configuration.Configuration) FileWriter(java.io.FileWriter) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Plan(org.apache.flink.api.common.Plan) File(java.io.File) JobClient(org.apache.flink.core.execution.JobClient) Test(org.junit.Test)

Example 57 with JobClient

use of org.apache.flink.core.execution.JobClient in project flink by apache.

the class RestoreUpgradedJobITCase method runOriginalJob.

@NotNull
private String runOriginalJob() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    env.getCheckpointConfig().enableUnalignedCheckpoints(false);
    env.getCheckpointConfig().setCheckpointStorage("file://" + temporaryFolder.getRoot().getAbsolutePath());
    env.setParallelism(PARALLELISM);
    env.enableCheckpointing(Integer.MAX_VALUE);
    // Different order of maps before and after savepoint.
    env.addSource(new IntSource(allDataEmittedLatch)).map(new IntMap(MAP_5.id())).uid(MAP_5.name()).forward().map(new IntMap(MAP_1.id())).uid(MAP_1.name()).slotSharingGroup("anotherSharingGroup").keyBy((key) -> key).map(new IntMap(MAP_6.id())).uid(MAP_6.name()).rebalance().map(new IntMap(MAP_4.id())).uid(MAP_4.name()).broadcast().map(new IntMap(MAP_2.id())).uid(MAP_2.name()).rescale().map(new IntMap(MAP_3.id())).uid(MAP_3.name()).addSink(new IntSink(result)).setParallelism(1);
    // when: Job is executed.
    JobClient jobClient = env.executeAsync("Total sum");
    waitForAllTaskRunning(CLUSTER.getMiniCluster(), jobClient.getJobID(), false);
    allDataEmittedLatch.get().await();
    allDataEmittedLatch.get().reset();
    return stopWithSnapshot(jobClient);
}
Also used : OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) MAP_1(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_1) RunWith(org.junit.runner.RunWith) MAP_2(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_2) MAP_3(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_3) SavepointConfigOptions(org.apache.flink.runtime.jobgraph.SavepointConfigOptions) MAP_4(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_4) MAP_5(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_5) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) MAP_6(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_6) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) ListState(org.apache.flink.api.common.state.ListState) CANONICAL_SAVEPOINT(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.TestCheckpointType.CANONICAL_SAVEPOINT) CommonTestUtils.waitForAllTaskRunning(org.apache.flink.runtime.testutils.CommonTestUtils.waitForAllTaskRunning) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) TestLogger(org.apache.flink.util.TestLogger) ALIGNED_CHECKPOINT(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.TestCheckpointType.ALIGNED_CHECKPOINT) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ClassRule(org.junit.ClassRule) SharedReference(org.apache.flink.testutils.junit.SharedReference) Parameterized(org.junit.runners.Parameterized) Types(org.apache.flink.api.common.typeinfo.Types) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) Preconditions.checkState(org.apache.flink.util.Preconditions.checkState) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) Iterator(java.util.Iterator) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Configuration(org.apache.flink.configuration.Configuration) Test(org.junit.Test) JobClient(org.apache.flink.core.execution.JobClient) ExecutionException(java.util.concurrent.ExecutionException) LockSupport(java.util.concurrent.locks.LockSupport) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) NATIVE_SAVEPOINT(org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.TestCheckpointType.NATIVE_SAVEPOINT) Matchers.is(org.hamcrest.Matchers.is) NotNull(org.jetbrains.annotations.NotNull) TemporaryFolder(org.junit.rules.TemporaryFolder) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobClient(org.apache.flink.core.execution.JobClient) NotNull(org.jetbrains.annotations.NotNull)

Example 58 with JobClient

use of org.apache.flink.core.execution.JobClient in project flink by apache.

the class UnalignedCheckpointCompatibilityITCase method runAndTakeSavepoint.

private Tuple2<String, Map<String, Object>> runAndTakeSavepoint() throws Exception {
    JobClient jobClient = submitJobInitially(env(startAligned, 0));
    waitForAllTaskRunning(() -> miniCluster.getMiniCluster().getExecutionGraph(jobClient.getJobID()).get(), false);
    // wait for some backpressure from sink
    Thread.sleep(FIRST_RUN_BACKPRESSURE_MS);
    Future<Map<String, Object>> accFuture = jobClient.getJobExecutionResult().thenApply(JobExecutionResult::getAllAccumulatorResults);
    Future<String> savepointFuture = jobClient.stopWithSavepoint(false, tempFolder().toURI().toString(), SavepointFormatType.CANONICAL);
    return new Tuple2<>(savepointFuture.get(), accFuture.get());
}
Also used : JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobClient(org.apache.flink.core.execution.JobClient) Map(java.util.Map) Collections.emptyMap(java.util.Collections.emptyMap)

Example 59 with JobClient

use of org.apache.flink.core.execution.JobClient in project flink by apache.

the class CheckpointStoreITCase method testJobClientRemainsResponsiveDuringCompletedCheckpointStoreRecovery.

@Test
public void testJobClientRemainsResponsiveDuringCompletedCheckpointStoreRecovery() throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(10);
    env.setRestartStrategy(fixedDelayRestart(2, /* failure on processing + on recovery */
    0));
    env.addSource(emitUntil(() -> FailingMapper.failedAndProcessed)).map(new FailingMapper()).addSink(new DiscardingSink<>());
    final JobClient jobClient = env.executeAsync();
    BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsStart.await();
    for (int i = 0; i < 10; i++) {
        final JobStatus jobStatus = jobClient.getJobStatus().get();
        assertEquals(JobStatus.INITIALIZING, jobStatus);
    }
    BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsFinished.countDown();
    // Await for job to finish.
    jobClient.getJobExecutionResult().get();
    checkState(FailingMapper.failedAndProcessed);
}
Also used : JobStatus(org.apache.flink.api.common.JobStatus) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobClient(org.apache.flink.core.execution.JobClient) Test(org.junit.Test)

Example 60 with JobClient

use of org.apache.flink.core.execution.JobClient in project flink by apache.

the class TestUtils method tryExecute.

/**
 * Execute the job and wait for the job result synchronously.
 *
 * @throws Exception If executing the environment throws an exception which does not have {@link
 *     SuccessException} as a cause.
 */
public static void tryExecute(StreamExecutionEnvironment see, String name) throws Exception {
    JobClient jobClient = null;
    try {
        StreamGraph graph = see.getStreamGraph();
        graph.setJobName(name);
        jobClient = see.executeAsync(graph);
        jobClient.getJobExecutionResult().get();
    } catch (Throwable root) {
        if (jobClient != null) {
            try {
                jobClient.cancel().get();
            } catch (Exception e) {
            // Exception could be thrown if the job has already finished.
            // Ignore the exception.
            }
        }
        Optional<SuccessException> successAsCause = ExceptionUtils.findThrowable(root, SuccessException.class);
        if (!successAsCause.isPresent()) {
            root.printStackTrace();
            fail("Test failed: " + root.getMessage());
        }
    }
}
Also used : Optional(java.util.Optional) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) JobClient(org.apache.flink.core.execution.JobClient) IOException(java.io.IOException) JobInitializationException(org.apache.flink.runtime.client.JobInitializationException)

Aggregations

JobClient (org.apache.flink.core.execution.JobClient)70 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)36 Test (org.junit.Test)32 JobExecutionResult (org.apache.flink.api.common.JobExecutionResult)16 Configuration (org.apache.flink.configuration.Configuration)16 JobListener (org.apache.flink.core.execution.JobListener)14 ArrayList (java.util.ArrayList)12 List (java.util.List)10 JobID (org.apache.flink.api.common.JobID)10 ExecutionException (java.util.concurrent.ExecutionException)9 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 DEFAULT_COLLECT_DATA_TIMEOUT (org.apache.flink.connector.testframe.utils.ConnectorTestConstants.DEFAULT_COLLECT_DATA_TIMEOUT)8 DEFAULT_JOB_STATUS_CHANGE_TIMEOUT (org.apache.flink.connector.testframe.utils.ConnectorTestConstants.DEFAULT_JOB_STATUS_CHANGE_TIMEOUT)8 IOException (java.io.IOException)7 DisplayName (org.junit.jupiter.api.DisplayName)7 TestTemplate (org.junit.jupiter.api.TestTemplate)7 Iterator (java.util.Iterator)6 CompletableFuture (java.util.concurrent.CompletableFuture)6 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)6 Preconditions.checkNotNull (org.apache.flink.util.Preconditions.checkNotNull)6