use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class RestoreUpgradedJobITCase method runUpgradedJob.
private void runUpgradedJob(String snapshotPath) throws Exception {
StreamExecutionEnvironment env;
Configuration conf = new Configuration();
conf.set(SavepointConfigOptions.SAVEPOINT_PATH, snapshotPath);
env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
env.setParallelism(PARALLELISM);
env.addSource(new StringSource(allDataEmittedLatch)).map(new StringMap(MAP_1.id())).uid(MAP_1.name()).forward().map(new StringMap(MAP_2.id())).uid(MAP_2.name()).slotSharingGroup("anotherSharingGroup").keyBy((key) -> key).map(new StringMap(MAP_3.id())).uid(MAP_3.name()).map(new StringMap(-1)).uid("new_chained_map").rebalance().map(new StringMap(-2)).uid("new_map2").map(new StringMap(MAP_4.id())).uid(MAP_4.name()).rescale().map(new StringMap(MAP_5.id())).uid(MAP_5.name()).broadcast().map(new StringMap(MAP_6.id())).uid(MAP_6.name()).addSink(new StringSink(result)).setParallelism(1);
JobClient jobClient = env.executeAsync("Total sum");
waitForAllTaskRunning(CLUSTER.getMiniCluster(), jobClient.getJobID(), false);
allDataEmittedLatch.get().await();
// Using stopWithSavepoint to be sure that all values reached the sink.
jobClient.stopWithSavepoint(true, temporaryFolder.getRoot().getAbsolutePath(), SavepointFormatType.CANONICAL).get();
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class UnalignedCheckpointFailureHandlingITCase method testCheckpointSuccessAfterFailure.
@Test
public void testCheckpointSuccessAfterFailure() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
TestCheckpointStorage storage = new TestCheckpointStorage(new JobManagerCheckpointStorage(), sharedObjects, temporaryFolder);
configure(env, storage);
buildGraph(env);
JobClient jobClient = env.executeAsync();
JobID jobID = jobClient.getJobID();
MiniCluster miniCluster = miniClusterResource.getMiniCluster();
waitForJobStatus(jobClient, singletonList(RUNNING), fromNow(Duration.ofSeconds(30)));
waitForAllTaskRunning(miniCluster, jobID, false);
triggerFailingCheckpoint(jobID, TestException.class, miniCluster);
miniCluster.triggerCheckpoint(jobID).get();
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class UnalignedCheckpointCompatibilityITCase method runAndTakeExternalCheckpoint.
private Tuple2<String, Map<String, Object>> runAndTakeExternalCheckpoint() throws Exception {
JobClient jobClient = submitJobInitially(env(startAligned, Integer.MAX_VALUE));
waitForAllTaskRunning(() -> miniCluster.getMiniCluster().getExecutionGraph(jobClient.getJobID()).get(), false);
// wait for some backpressure from sink
Thread.sleep(FIRST_RUN_BACKPRESSURE_MS);
String checkpointPath = miniCluster.getMiniCluster().triggerCheckpoint(jobClient.getJobID()).get();
cancelJob(jobClient);
return new Tuple2<>(checkpointPath, emptyMap());
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ManualCheckpointITCase method testTriggeringWhenPeriodicEnabled.
@Test
public void testTriggeringWhenPeriodicEnabled() throws Exception {
int parallelism = MINI_CLUSTER_RESOURCE.getNumberSlots();
final int checkpointingInterval = 500;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.enableCheckpointing(checkpointingInterval);
env.getCheckpointConfig().setCheckpointStorage(storageSupplier.apply(temporaryFolder.newFolder().toURI().toString()));
env.fromSource(MockSource.continuous(parallelism).build(), WatermarkStrategy.noWatermarks(), "generator").keyBy(key -> key % parallelism).flatMap(new StatefulMapper()).addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
final JobID jobID = jobClient.getJobID();
final MiniCluster miniCluster = MINI_CLUSTER_RESOURCE.getMiniCluster();
CommonTestUtils.waitForJobStatus(jobClient, Collections.singletonList(JobStatus.RUNNING), Deadline.fromNow(Duration.ofSeconds(30)));
CommonTestUtils.waitForAllTaskRunning(miniCluster, jobID, false);
CommonTestUtils.waitUntilCondition(() -> queryCompletedCheckpoints(miniCluster, jobID) > 0L, Deadline.fromNow(Duration.ofSeconds(30)), checkpointingInterval / 2);
final long numberOfPeriodicCheckpoints = queryCompletedCheckpoints(miniCluster, jobID);
// wait for the checkpoint to be taken
miniCluster.triggerCheckpoint(jobID).get();
miniCluster.cancelJob(jobID).get();
queryCompletedCheckpointsUntil(miniCluster, jobID, count -> count >= numberOfPeriodicCheckpoints + 1);
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class ManualCheckpointITCase method testTriggeringWhenPeriodicDisabled.
@Test
public void testTriggeringWhenPeriodicDisabled() throws Exception {
int parallelism = MINI_CLUSTER_RESOURCE.getNumberSlots();
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.getCheckpointConfig().setCheckpointStorage(storageSupplier.apply(temporaryFolder.newFolder().toURI().toString()));
env.fromSource(MockSource.continuous(parallelism).build(), WatermarkStrategy.noWatermarks(), "generator").keyBy(key -> key % parallelism).flatMap(new StatefulMapper()).addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
final JobID jobID = jobClient.getJobID();
final MiniCluster miniCluster = MINI_CLUSTER_RESOURCE.getMiniCluster();
CommonTestUtils.waitForJobStatus(jobClient, Collections.singletonList(JobStatus.RUNNING), Deadline.fromNow(Duration.ofSeconds(30)));
CommonTestUtils.waitForAllTaskRunning(miniCluster, jobID, false);
// wait for the checkpoint to be taken
miniCluster.triggerCheckpoint(jobID).get();
miniCluster.cancelJob(jobID).get();
queryCompletedCheckpointsUntil(miniCluster, jobID, count -> count == 1);
}
Aggregations