use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class StateBackendLoadingTest method testLoadFileSystemStateBackend.
// ------------------------------------------------------------------------
// File System State Backend
// ------------------------------------------------------------------------
/**
* Validates loading a file system state backend with additional parameters from the cluster
* configuration.
*/
@Test
public void testLoadFileSystemStateBackend() throws Exception {
final String checkpointDir = new Path(tmp.newFolder().toURI()).toString();
final String savepointDir = new Path(tmp.newFolder().toURI()).toString();
final Path expectedCheckpointsPath = new Path(checkpointDir);
final Path expectedSavepointsPath = new Path(savepointDir);
final MemorySize threshold = MemorySize.parse("900kb");
final int minWriteBufferSize = 1024;
// we configure with the explicit string (rather than
// AbstractStateBackend#X_STATE_BACKEND_NAME)
// to guard against config-breaking changes of the name
final Configuration config1 = new Configuration();
config1.setString(backendKey, "filesystem");
config1.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir);
config1.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir);
config1.set(CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, threshold);
config1.setInteger(CheckpointingOptions.FS_WRITE_BUFFER_SIZE, minWriteBufferSize);
final Configuration config2 = new Configuration();
config2.setString(backendKey, FsStateBackendFactory.class.getName());
config2.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir);
config2.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir);
config2.set(CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, threshold);
config1.setInteger(CheckpointingOptions.FS_WRITE_BUFFER_SIZE, minWriteBufferSize);
StateBackend backend1 = StateBackendLoader.loadStateBackendFromConfig(config1, cl, null);
StateBackend backend2 = StateBackendLoader.loadStateBackendFromConfig(config2, cl, null);
assertTrue(backend1 instanceof HashMapStateBackend);
assertTrue(backend2 instanceof FsStateBackend);
HashMapStateBackend fs1 = (HashMapStateBackend) backend1;
FsStateBackend fs2 = (FsStateBackend) backend2;
assertEquals(expectedCheckpointsPath, fs2.getCheckpointPath());
assertEquals(expectedSavepointsPath, fs2.getSavepointPath());
assertEquals(threshold.getBytes(), fs2.getMinFileSizeThreshold());
assertEquals(Math.max(threshold.getBytes(), minWriteBufferSize), fs2.getWriteBufferSize());
}
use of org.apache.flink.runtime.state.hashmap.HashMapStateBackend in project flink by apache.
the class StatefulJobSnapshotMigrationITCase method testSavepoint.
@Test
public void testSavepoint() throws Exception {
final int parallelism = 4;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRestartStrategy(RestartStrategies.noRestart());
switch(snapshotSpec.getStateBackendType()) {
case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME:
env.setStateBackend(new EmbeddedRocksDBStateBackend());
break;
case StateBackendLoader.MEMORY_STATE_BACKEND_NAME:
env.setStateBackend(new MemoryStateBackend());
break;
case StateBackendLoader.HASHMAP_STATE_BACKEND_NAME:
env.setStateBackend(new HashMapStateBackend());
break;
default:
throw new UnsupportedOperationException();
}
env.enableCheckpointing(500);
env.setParallelism(parallelism);
env.setMaxParallelism(parallelism);
env.enableChangelogStateBackend(false);
SourceFunction<Tuple2<Long, Long>> nonParallelSource;
SourceFunction<Tuple2<Long, Long>> parallelSource;
RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>> flatMap;
OneInputStreamOperator<Tuple2<Long, Long>, Tuple2<Long, Long>> timelyOperator;
if (executionMode == ExecutionMode.CREATE_SNAPSHOT) {
nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS);
parallelSource = new MigrationTestUtils.CheckpointingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS);
flatMap = new CheckpointingKeyedStateFlatMap();
timelyOperator = new CheckpointingTimelyStatefulOperator();
} else if (executionMode == ExecutionMode.VERIFY_SNAPSHOT) {
nonParallelSource = new MigrationTestUtils.CheckingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS);
parallelSource = new MigrationTestUtils.CheckingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS);
flatMap = new CheckingKeyedStateFlatMap();
timelyOperator = new CheckingTimelyStatefulOperator();
} else {
throw new IllegalStateException("Unknown ExecutionMode " + executionMode);
}
env.addSource(nonParallelSource).uid("CheckpointingSource1").keyBy(0).flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap1").keyBy(0).transform("timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator1").addSink(new MigrationTestUtils.AccumulatorCountingSink<>());
env.addSource(parallelSource).uid("CheckpointingSource2").keyBy(0).flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap2").keyBy(0).transform("timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator2").addSink(new MigrationTestUtils.AccumulatorCountingSink<>());
final String snapshotPath = getSnapshotPath(snapshotSpec);
if (executionMode == ExecutionMode.CREATE_SNAPSHOT) {
executeAndSnapshot(env, "src/test/resources/" + snapshotPath, snapshotSpec.getSnapshotType(), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2));
} else {
restoreAndExecute(env, getResourceFilename(snapshotPath), new Tuple2<>(MigrationTestUtils.CheckingNonParallelSourceWithListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(MigrationTestUtils.CheckingParallelSourceWithUnionListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, parallelism), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2));
}
}
Aggregations