Search in sources :

Example 11 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class WritableSavepointITCase method validateModification.

private void validateModification(StateBackend backend, String savepointPath) throws Exception {
    StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    sEnv.setStateBackend(backend);
    DataStream<Account> stream = sEnv.fromCollection(accounts).keyBy(acc -> acc.id).flatMap(new UpdateAndGetAccount()).uid(ACCOUNT_UID);
    CompletableFuture<Collection<Account>> results = collector.collect(stream);
    stream.map(acc -> acc.id).map(new StatefulOperator()).uid(MODIFY_UID).addSink(new DiscardingSink<>());
    JobGraph jobGraph = sEnv.getStreamGraph().getJobGraph();
    jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, false));
    ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient();
    Optional<SerializedThrowable> serializedThrowable = client.submitJob(jobGraph).thenCompose(client::requestJobResult).get().getSerializedThrowable();
    Assert.assertFalse(serializedThrowable.isPresent());
    Assert.assertEquals("Unexpected output", 3, results.get().size());
}
Also used : RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) Arrays(java.util.Arrays) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CompletableFuture(java.util.concurrent.CompletableFuture) MapStateDescriptor(org.apache.flink.api.common.state.MapStateDescriptor) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) ArrayList(java.util.ArrayList) StateBootstrapFunction(org.apache.flink.state.api.functions.StateBootstrapFunction) HashSet(java.util.HashSet) ListState(org.apache.flink.api.common.state.ListState) DataSet(org.apache.flink.api.java.DataSet) StateBackend(org.apache.flink.runtime.state.StateBackend) StreamCollector(org.apache.flink.streaming.util.StreamCollector) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) KeyedStateBootstrapFunction(org.apache.flink.state.api.functions.KeyedStateBootstrapFunction) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) Types(org.apache.flink.api.common.typeinfo.Types) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) AbstractID(org.apache.flink.util.AbstractID) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Configuration(org.apache.flink.configuration.Configuration) Set(java.util.Set) Test(org.junit.Test) FsStateBackend(org.apache.flink.runtime.state.filesystem.FsStateBackend) RocksDBStateBackend(org.apache.flink.contrib.streaming.state.RocksDBStateBackend) DataStream(org.apache.flink.streaming.api.datastream.DataStream) Objects(java.util.Objects) List(java.util.List) Rule(org.junit.Rule) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) BroadcastProcessFunction(org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) BroadcastStateBootstrapFunction(org.apache.flink.state.api.functions.BroadcastStateBootstrapFunction) SerializedThrowable(org.apache.flink.util.SerializedThrowable) Optional(java.util.Optional) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Collection(java.util.Collection) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) SerializedThrowable(org.apache.flink.util.SerializedThrowable)

Example 12 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class SavepointWriterITCase method validateModification.

private void validateModification(StateBackend backend, String savepointPath) throws Exception {
    StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    if (backend != null) {
        sEnv.setStateBackend(backend);
    }
    DataStream<Account> stream = sEnv.fromCollection(accounts).keyBy(acc -> acc.id).flatMap(new UpdateAndGetAccount()).uid(ACCOUNT_UID);
    CompletableFuture<Collection<Account>> results = collector.collect(stream);
    stream.map(acc -> acc.id).map(new StatefulOperator()).uid(MODIFY_UID).addSink(new DiscardingSink<>());
    JobGraph jobGraph = sEnv.getStreamGraph().getJobGraph();
    jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, false));
    ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient();
    Optional<SerializedThrowable> serializedThrowable = client.submitJob(jobGraph).thenCompose(client::requestJobResult).get().getSerializedThrowable();
    Assert.assertFalse(serializedThrowable.isPresent());
    Assert.assertEquals("Unexpected output", 3, results.get().size());
}
Also used : RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) Arrays(java.util.Arrays) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CompletableFuture(java.util.concurrent.CompletableFuture) MapStateDescriptor(org.apache.flink.api.common.state.MapStateDescriptor) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) ArrayList(java.util.ArrayList) StateBootstrapFunction(org.apache.flink.state.api.functions.StateBootstrapFunction) HashSet(java.util.HashSet) ListState(org.apache.flink.api.common.state.ListState) StateBackend(org.apache.flink.runtime.state.StateBackend) StreamCollector(org.apache.flink.streaming.util.StreamCollector) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) KeyedStateBootstrapFunction(org.apache.flink.state.api.functions.KeyedStateBootstrapFunction) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) Types(org.apache.flink.api.common.typeinfo.Types) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) AbstractID(org.apache.flink.util.AbstractID) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Configuration(org.apache.flink.configuration.Configuration) Set(java.util.Set) Test(org.junit.Test) DataStream(org.apache.flink.streaming.api.datastream.DataStream) Objects(java.util.Objects) List(java.util.List) Rule(org.junit.Rule) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) BroadcastProcessFunction(org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) BroadcastStateBootstrapFunction(org.apache.flink.state.api.functions.BroadcastStateBootstrapFunction) SerializedThrowable(org.apache.flink.util.SerializedThrowable) Optional(java.util.Optional) Assert(org.junit.Assert) RuntimeExecutionMode(org.apache.flink.api.common.RuntimeExecutionMode) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Collection(java.util.Collection) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) SerializedThrowable(org.apache.flink.util.SerializedThrowable)

Example 13 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class SavepointITCase method testCanRestoreWithModifiedStatelessOperators.

/**
 * FLINK-5985
 *
 * <p>This test ensures we can restore from a savepoint under modifications to the job graph
 * that only concern stateless operators.
 */
@Test
public void testCanRestoreWithModifiedStatelessOperators() throws Exception {
    // Config
    int numTaskManagers = 2;
    int numSlotsPerTaskManager = 2;
    int parallelism = 2;
    // Test deadline
    final Deadline deadline = Deadline.now().plus(Duration.ofMinutes(5));
    // Flink configuration
    final Configuration config = new Configuration();
    config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString());
    String savepointPath;
    LOG.info("Flink configuration: " + config + ".");
    // Start Flink
    MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
    LOG.info("Shutting down Flink cluster.");
    cluster.before();
    ClusterClient<?> client = cluster.getClusterClient();
    try {
        final StatefulCounter statefulCounter = new StatefulCounter();
        StatefulCounter.resetForTest(parallelism);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        env.addSource(new InfiniteTestSource()).shuffle().map(value -> 4 * value).shuffle().map(statefulCounter).uid("statefulCounter").shuffle().map(value -> 2 * value).addSink(new DiscardingSink<>());
        JobGraph originalJobGraph = env.getStreamGraph().getJobGraph();
        JobID jobID = client.submitJob(originalJobGraph).get();
        // wait for the Tasks to be ready
        waitForAllTaskRunning(cluster.getMiniCluster(), jobID, false);
        assertTrue(StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
        savepointPath = client.triggerSavepoint(jobID, null, SavepointFormatType.CANONICAL).get();
        LOG.info("Retrieved savepoint: " + savepointPath + ".");
    } finally {
        // Shut down the Flink cluster (thereby canceling the job)
        LOG.info("Shutting down Flink cluster.");
        cluster.after();
    }
    // create a new MiniCluster to make sure we start with completely
    // new resources
    cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
    LOG.info("Restarting Flink cluster.");
    cluster.before();
    client = cluster.getClusterClient();
    try {
        // Reset static test helpers
        StatefulCounter.resetForTest(parallelism);
        // Gather all task deployment descriptors
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        // generate a modified job graph that adds a stateless op
        env.addSource(new InfiniteTestSource()).shuffle().map(new StatefulCounter()).uid("statefulCounter").shuffle().map(value -> value).addSink(new DiscardingSink<>());
        JobGraph modifiedJobGraph = env.getStreamGraph().getJobGraph();
        // Set the savepoint path
        modifiedJobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
        LOG.info("Resubmitting job " + modifiedJobGraph.getJobID() + " with " + "savepoint path " + savepointPath + " in detached mode.");
        // Submit the job
        client.submitJob(modifiedJobGraph).get();
        // Await state is restored
        assertTrue(StatefulCounter.getRestoreLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
        // Await some progress after restore
        assertTrue(StatefulCounter.getProgressLatch().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
    } finally {
        cluster.after();
    }
}
Also used : Arrays(java.util.Arrays) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) MemorySize(org.apache.flink.configuration.MemorySize) EmptyRequestBody(org.apache.flink.runtime.rest.messages.EmptyRequestBody) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) ExceptionUtils.findThrowable(org.apache.flink.util.ExceptionUtils.findThrowable) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) TestUtils.submitJobAndWaitForResult(org.apache.flink.test.util.TestUtils.submitJobAndWaitForResult) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) Duration(java.time.Duration) Map(java.util.Map) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) ExceptionUtils.assertThrowable(org.apache.flink.util.ExceptionUtils.assertThrowable) RichSourceFunction(org.apache.flink.streaming.api.functions.source.RichSourceFunction) Path(java.nio.file.Path) StateSnapshotContext(org.apache.flink.runtime.state.StateSnapshotContext) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) BoundedOneInput(org.apache.flink.streaming.api.operators.BoundedOneInput) FileSystemFactory(org.apache.flink.core.fs.FileSystemFactory) CountDownLatch(java.util.concurrent.CountDownLatch) JobMessageParameters(org.apache.flink.runtime.rest.messages.JobMessageParameters) Stream(java.util.stream.Stream) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) Assert.assertFalse(org.junit.Assert.assertFalse) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) Time(org.apache.flink.api.common.time.Time) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) FlinkException(org.apache.flink.util.FlinkException) LocalFileSystem(org.apache.flink.core.fs.local.LocalFileSystem) JobStatus(org.apache.flink.api.common.JobStatus) KeyedProcessFunction(org.apache.flink.streaming.api.functions.KeyedProcessFunction) TypeSafeDiagnosingMatcher(org.hamcrest.TypeSafeDiagnosingMatcher) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) Before(org.junit.Before) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) Files(java.nio.file.Files) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) FSDataInputStream(org.apache.flink.core.fs.FSDataInputStream) File(java.io.File) AbstractStreamOperator(org.apache.flink.streaming.api.operators.AbstractStreamOperator) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) Paths(java.nio.file.Paths) Matcher(org.hamcrest.Matcher) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) Assert.assertEquals(org.junit.Assert.assertEquals) StateBackendOptions(org.apache.flink.configuration.StateBackendOptions) EntropyInjectingTestFileSystem(org.apache.flink.testutils.EntropyInjectingTestFileSystem) Deadline(org.apache.flink.api.common.time.Deadline) ExceptionUtils.findThrowableWithMessage(org.apache.flink.util.ExceptionUtils.findThrowableWithMessage) ClusterOptions(org.apache.flink.configuration.ClusterOptions) FileUtils(org.apache.flink.util.FileUtils) URISyntaxException(java.net.URISyntaxException) BiFunction(java.util.function.BiFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LoggerFactory(org.slf4j.LoggerFactory) BlockingNoOpInvokable(org.apache.flink.runtime.testtasks.BlockingNoOpInvokable) Random(java.util.Random) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) MapFunction(org.apache.flink.api.common.functions.MapFunction) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) Assert.assertThat(org.junit.Assert.assertThat) ListState(org.apache.flink.api.common.state.ListState) CommonTestUtils.waitForAllTaskRunning(org.apache.flink.runtime.testutils.CommonTestUtils.waitForAllTaskRunning) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) TestLogger(org.apache.flink.util.TestLogger) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) Assert.fail(org.junit.Assert.fail) URI(java.net.URI) KeySelector(org.apache.flink.api.java.functions.KeySelector) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) CheckpointingOptions(org.apache.flink.configuration.CheckpointingOptions) Objects(java.util.Objects) TestingUtils(org.apache.flink.testutils.TestingUtils) List(java.util.List) FileSystem(org.apache.flink.core.fs.FileSystem) FlinkJobNotFoundException(org.apache.flink.runtime.messages.FlinkJobNotFoundException) Optional(java.util.Optional) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) IterativeStream(org.apache.flink.streaming.api.datastream.IterativeStream) CompletableFuture(java.util.concurrent.CompletableFuture) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) RestClusterClient(org.apache.flink.client.program.rest.RestClusterClient) RestoreMode(org.apache.flink.runtime.jobgraph.RestoreMode) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) CompletableFuture.allOf(java.util.concurrent.CompletableFuture.allOf) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) JobDetailsHeaders(org.apache.flink.runtime.rest.messages.job.JobDetailsHeaders) SharedReference(org.apache.flink.testutils.junit.SharedReference) Description(org.hamcrest.Description) Logger(org.slf4j.Logger) LocalRecoverableWriter(org.apache.flink.core.fs.local.LocalRecoverableWriter) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) ExceptionUtils.assertThrowableWithMessage(org.apache.flink.util.ExceptionUtils.assertThrowableWithMessage) DataStream(org.apache.flink.streaming.api.datastream.DataStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Ignore(org.junit.Ignore) ListCheckpointed(org.apache.flink.streaming.api.checkpoint.ListCheckpointed) FileVisitOption(java.nio.file.FileVisitOption) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) Configuration(org.apache.flink.configuration.Configuration) Deadline(org.apache.flink.api.common.time.Deadline) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 14 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class NotifyCheckpointAbortedITCase method testNotifyCheckpointAborted.

/**
 * Verify operators would be notified as checkpoint aborted.
 *
 * <p>The job would run with at least two checkpoints. The 1st checkpoint would fail due to add
 * checkpoint to store, and the 2nd checkpoint would decline by async checkpoint phase of
 * 'DeclineSink'.
 *
 * <p>The job graph looks like: NormalSource --> keyBy --> NormalMap --> DeclineSink
 */
@Test(timeout = TEST_TIMEOUT)
public void testNotifyCheckpointAborted() throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(200, CheckpointingMode.EXACTLY_ONCE);
    env.getCheckpointConfig().enableUnalignedCheckpoints(unalignedCheckpointEnabled);
    env.getCheckpointConfig().setTolerableCheckpointFailureNumber(1);
    env.disableOperatorChaining();
    env.setParallelism(1);
    final StateBackend failingStateBackend = new DeclineSinkFailingStateBackend(checkpointPath);
    env.setStateBackend(failingStateBackend);
    env.addSource(new NormalSource()).name("NormalSource").keyBy((KeySelector<Tuple2<Integer, Integer>, Integer>) value -> value.f0).transform("NormalMap", TypeInformation.of(Integer.class), new NormalMap()).transform(DECLINE_SINK_NAME, TypeInformation.of(Object.class), new DeclineSink());
    final ClusterClient<?> clusterClient = cluster.getClusterClient();
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    JobID jobID = jobGraph.getJobID();
    clusterClient.submitJob(jobGraph).get();
    TestingCompletedCheckpointStore.addCheckpointLatch.await();
    log.info("The checkpoint to abort is ready to add to checkpoint store.");
    TestingCompletedCheckpointStore.abortCheckpointLatch.trigger();
    log.info("Verifying whether all operators have been notified of checkpoint-1 aborted.");
    verifyAllOperatorsNotifyAborted();
    log.info("Verified that all operators have been notified of checkpoint-1 aborted.");
    resetAllOperatorsNotifyAbortedLatches();
    verifyAllOperatorsNotifyAbortedTimes(1);
    NormalSource.waitLatch.trigger();
    log.info("Verifying whether all operators have been notified of checkpoint-2 aborted.");
    verifyAllOperatorsNotifyAborted();
    log.info("Verified that all operators have been notified of checkpoint-2 aborted.");
    verifyAllOperatorsNotifyAbortedTimes(2);
    clusterClient.cancel(jobID).get();
    log.info("Test is verified successfully as expected.");
}
Also used : Arrays(java.util.Arrays) Tuple2(org.apache.flink.api.java.tuple.Tuple2) EmbeddedHaServicesWithLeadershipControl(org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedHaServicesWithLeadershipControl) ASYNCHRONOUS(org.apache.flink.runtime.state.SnapshotExecutionType.ASYNCHRONOUS) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CheckpointingMode(org.apache.flink.streaming.api.CheckpointingMode) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) SnapshotResources(org.apache.flink.runtime.state.SnapshotResources) MapFunction(org.apache.flink.api.common.functions.MapFunction) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) StateBackend(org.apache.flink.runtime.state.StateBackend) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) PerJobCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.PerJobCheckpointRecoveryFactory) Path(org.apache.flink.core.fs.Path) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) After(org.junit.After) TestLogger(org.apache.flink.util.TestLogger) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) CompletedCheckpoint(org.apache.flink.runtime.checkpoint.CompletedCheckpoint) DefaultOperatorStateBackend(org.apache.flink.runtime.state.DefaultOperatorStateBackend) ClassRule(org.junit.ClassRule) SnapshotStrategyRunner(org.apache.flink.runtime.state.SnapshotStrategyRunner) Parameterized(org.junit.runners.Parameterized) KeySelector(org.apache.flink.api.java.functions.KeySelector) HighAvailabilityServices(org.apache.flink.runtime.highavailability.HighAvailabilityServices) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) ExpectedTestException(org.apache.flink.runtime.operators.testutils.ExpectedTestException) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) SnapshotStrategy(org.apache.flink.runtime.state.SnapshotStrategy) CheckpointOptions(org.apache.flink.runtime.checkpoint.CheckpointOptions) FsStateBackend(org.apache.flink.runtime.state.filesystem.FsStateBackend) CheckpointingOptions(org.apache.flink.configuration.CheckpointingOptions) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) DefaultOperatorStateBackendBuilder(org.apache.flink.runtime.state.DefaultOperatorStateBackendBuilder) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Environment(org.apache.flink.runtime.execution.Environment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) BackendBuildingException(org.apache.flink.runtime.state.BackendBuildingException) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) ReadableConfig(org.apache.flink.configuration.ReadableConfig) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) StreamMap(org.apache.flink.streaming.api.operators.StreamMap) Nonnull(javax.annotation.Nonnull) StandaloneCompletedCheckpointStore(org.apache.flink.runtime.checkpoint.StandaloneCompletedCheckpointStore) StreamSink(org.apache.flink.streaming.api.operators.StreamSink) Before(org.junit.Before) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) SnapshotResult(org.apache.flink.runtime.state.SnapshotResult) Executor(java.util.concurrent.Executor) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Configuration(org.apache.flink.configuration.Configuration) CheckpointsCleaner(org.apache.flink.runtime.checkpoint.CheckpointsCleaner) Test(org.junit.Test) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) JobID(org.apache.flink.api.common.JobID) HighAvailabilityServicesFactory(org.apache.flink.runtime.highavailability.HighAvailabilityServicesFactory) CheckpointStreamFactory(org.apache.flink.runtime.state.CheckpointStreamFactory) HighAvailabilityOptions(org.apache.flink.configuration.HighAvailabilityOptions) TemporaryFolder(org.junit.rules.TemporaryFolder) Assert.assertEquals(org.junit.Assert.assertEquals) StateBackend(org.apache.flink.runtime.state.StateBackend) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) DefaultOperatorStateBackend(org.apache.flink.runtime.state.DefaultOperatorStateBackend) FsStateBackend(org.apache.flink.runtime.state.filesystem.FsStateBackend) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 15 with ClusterClient

use of org.apache.flink.client.program.ClusterClient in project flink by apache.

the class CliFrontend method createClient.

/**
	 * Creates a {@link ClusterClient} object from the given command line options and other parameters.
	 * @param options Command line options
	 * @param program The program for which to create the client.
	 * @throws Exception
	 */
protected ClusterClient createClient(CommandLineOptions options, PackagedProgram program) throws Exception {
    // Get the custom command-line (e.g. Standalone/Yarn/Mesos)
    CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(options.getCommandLine());
    ClusterClient client;
    try {
        client = activeCommandLine.retrieveCluster(options.getCommandLine(), config);
        logAndSysout("Cluster configuration: " + client.getClusterIdentifier());
    } catch (UnsupportedOperationException e) {
        try {
            String applicationName = "Flink Application: " + program.getMainClassName();
            client = activeCommandLine.createCluster(applicationName, options.getCommandLine(), config, program.getAllLibraries());
            logAndSysout("Cluster started: " + client.getClusterIdentifier());
        } catch (UnsupportedOperationException e2) {
            throw new IllegalConfigurationException("The JobManager address is neither provided at the command-line, " + "nor configured in flink-conf.yaml.");
        }
    }
    // Avoid resolving the JobManager Gateway here to prevent blocking until we invoke the user's program.
    final InetSocketAddress jobManagerAddress = client.getJobManagerAddress();
    logAndSysout("Using address " + jobManagerAddress.getHostString() + ":" + jobManagerAddress.getPort() + " to connect to JobManager.");
    logAndSysout("JobManager web interface address " + client.getWebInterfaceURL());
    return client;
}
Also used : ClusterClient(org.apache.flink.client.program.ClusterClient) InetSocketAddress(java.net.InetSocketAddress) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException)

Aggregations

ClusterClient (org.apache.flink.client.program.ClusterClient)22 Configuration (org.apache.flink.configuration.Configuration)14 Test (org.junit.Test)14 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)10 IOException (java.io.IOException)7 JobID (org.apache.flink.api.common.JobID)7 Arrays (java.util.Arrays)6 CompletableFuture (java.util.concurrent.CompletableFuture)6 ValueState (org.apache.flink.api.common.state.ValueState)6 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)6 StandaloneClusterClient (org.apache.flink.client.program.StandaloneClusterClient)6 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)6 List (java.util.List)5 ListState (org.apache.flink.api.common.state.ListState)5 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)5 DataStream (org.apache.flink.streaming.api.datastream.DataStream)5 Collector (org.apache.flink.util.Collector)5 File (java.io.File)4 FileNotFoundException (java.io.FileNotFoundException)4 Collection (java.util.Collection)4