use of org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_6 in project flink by apache.
the class RestoreUpgradedJobITCase method runUpgradedJob.
private void runUpgradedJob(String snapshotPath) throws Exception {
StreamExecutionEnvironment env;
Configuration conf = new Configuration();
conf.set(SavepointConfigOptions.SAVEPOINT_PATH, snapshotPath);
env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
env.setParallelism(PARALLELISM);
env.addSource(new StringSource(allDataEmittedLatch)).map(new StringMap(MAP_1.id())).uid(MAP_1.name()).forward().map(new StringMap(MAP_2.id())).uid(MAP_2.name()).slotSharingGroup("anotherSharingGroup").keyBy((key) -> key).map(new StringMap(MAP_3.id())).uid(MAP_3.name()).map(new StringMap(-1)).uid("new_chained_map").rebalance().map(new StringMap(-2)).uid("new_map2").map(new StringMap(MAP_4.id())).uid(MAP_4.name()).rescale().map(new StringMap(MAP_5.id())).uid(MAP_5.name()).broadcast().map(new StringMap(MAP_6.id())).uid(MAP_6.name()).addSink(new StringSink(result)).setParallelism(1);
JobClient jobClient = env.executeAsync("Total sum");
waitForAllTaskRunning(CLUSTER.getMiniCluster(), jobClient.getJobID(), false);
allDataEmittedLatch.get().await();
// Using stopWithSavepoint to be sure that all values reached the sink.
jobClient.stopWithSavepoint(true, temporaryFolder.getRoot().getAbsolutePath(), SavepointFormatType.CANONICAL).get();
}
use of org.apache.flink.test.checkpointing.RestoreUpgradedJobITCase.MapName.MAP_6 in project flink by apache.
the class RestoreUpgradedJobITCase method runOriginalJob.
@NotNull
private String runOriginalJob() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
env.getCheckpointConfig().enableUnalignedCheckpoints(false);
env.getCheckpointConfig().setCheckpointStorage("file://" + temporaryFolder.getRoot().getAbsolutePath());
env.setParallelism(PARALLELISM);
env.enableCheckpointing(Integer.MAX_VALUE);
// Different order of maps before and after savepoint.
env.addSource(new IntSource(allDataEmittedLatch)).map(new IntMap(MAP_5.id())).uid(MAP_5.name()).forward().map(new IntMap(MAP_1.id())).uid(MAP_1.name()).slotSharingGroup("anotherSharingGroup").keyBy((key) -> key).map(new IntMap(MAP_6.id())).uid(MAP_6.name()).rebalance().map(new IntMap(MAP_4.id())).uid(MAP_4.name()).broadcast().map(new IntMap(MAP_2.id())).uid(MAP_2.name()).rescale().map(new IntMap(MAP_3.id())).uid(MAP_3.name()).addSink(new IntSink(result)).setParallelism(1);
// when: Job is executed.
JobClient jobClient = env.executeAsync("Total sum");
waitForAllTaskRunning(CLUSTER.getMiniCluster(), jobClient.getJobID(), false);
allDataEmittedLatch.get().await();
allDataEmittedLatch.get().reset();
return stopWithSnapshot(jobClient);
}
Aggregations