Search in sources :

Example 6 with EmbeddedRocksDBStateBackend

use of org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend in project flink by apache.

the class WritableSavepointITCase method testEmbeddedRocksDBStateBackend.

@Test
public void testEmbeddedRocksDBStateBackend() throws Exception {
    StateBackend backend = new EmbeddedRocksDBStateBackend();
    testStateBootstrapAndModification(backend);
}
Also used : EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) StateBackend(org.apache.flink.runtime.state.StateBackend) FsStateBackend(org.apache.flink.runtime.state.filesystem.FsStateBackend) RocksDBStateBackend(org.apache.flink.contrib.streaming.state.RocksDBStateBackend) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) Test(org.junit.Test)

Example 7 with EmbeddedRocksDBStateBackend

use of org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend in project flink by apache.

the class StatefulJobSnapshotMigrationITCase method testSavepoint.

@Test
public void testSavepoint() throws Exception {
    final int parallelism = 4;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setRestartStrategy(RestartStrategies.noRestart());
    switch(snapshotSpec.getStateBackendType()) {
        case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME:
            env.setStateBackend(new EmbeddedRocksDBStateBackend());
            break;
        case StateBackendLoader.MEMORY_STATE_BACKEND_NAME:
            env.setStateBackend(new MemoryStateBackend());
            break;
        case StateBackendLoader.HASHMAP_STATE_BACKEND_NAME:
            env.setStateBackend(new HashMapStateBackend());
            break;
        default:
            throw new UnsupportedOperationException();
    }
    env.enableCheckpointing(500);
    env.setParallelism(parallelism);
    env.setMaxParallelism(parallelism);
    env.enableChangelogStateBackend(false);
    SourceFunction<Tuple2<Long, Long>> nonParallelSource;
    SourceFunction<Tuple2<Long, Long>> parallelSource;
    RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>> flatMap;
    OneInputStreamOperator<Tuple2<Long, Long>, Tuple2<Long, Long>> timelyOperator;
    if (executionMode == ExecutionMode.CREATE_SNAPSHOT) {
        nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS);
        parallelSource = new MigrationTestUtils.CheckpointingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS);
        flatMap = new CheckpointingKeyedStateFlatMap();
        timelyOperator = new CheckpointingTimelyStatefulOperator();
    } else if (executionMode == ExecutionMode.VERIFY_SNAPSHOT) {
        nonParallelSource = new MigrationTestUtils.CheckingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS);
        parallelSource = new MigrationTestUtils.CheckingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS);
        flatMap = new CheckingKeyedStateFlatMap();
        timelyOperator = new CheckingTimelyStatefulOperator();
    } else {
        throw new IllegalStateException("Unknown ExecutionMode " + executionMode);
    }
    env.addSource(nonParallelSource).uid("CheckpointingSource1").keyBy(0).flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap1").keyBy(0).transform("timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {
    }.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator1").addSink(new MigrationTestUtils.AccumulatorCountingSink<>());
    env.addSource(parallelSource).uid("CheckpointingSource2").keyBy(0).flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap2").keyBy(0).transform("timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {
    }.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator2").addSink(new MigrationTestUtils.AccumulatorCountingSink<>());
    final String snapshotPath = getSnapshotPath(snapshotSpec);
    if (executionMode == ExecutionMode.CREATE_SNAPSHOT) {
        executeAndSnapshot(env, "src/test/resources/" + snapshotPath, snapshotSpec.getSnapshotType(), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2));
    } else {
        restoreAndExecute(env, getResourceFilename(snapshotPath), new Tuple2<>(MigrationTestUtils.CheckingNonParallelSourceWithListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(MigrationTestUtils.CheckingParallelSourceWithUnionListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, parallelism), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2));
    }
}
Also used : TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) MigrationTestUtils(org.apache.flink.test.checkpointing.utils.MigrationTestUtils) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) HashMapStateBackend(org.apache.flink.runtime.state.hashmap.HashMapStateBackend) Test(org.junit.Test)

Aggregations

EmbeddedRocksDBStateBackend (org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend)7 HashMapStateBackend (org.apache.flink.runtime.state.hashmap.HashMapStateBackend)6 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)6 Test (org.junit.Test)5 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)3 MemoryStateBackend (org.apache.flink.runtime.state.memory.MemoryStateBackend)3 MigrationTestUtils (org.apache.flink.test.checkpointing.utils.MigrationTestUtils)3 IOException (java.io.IOException)2 List (java.util.List)2 RichFlatMapFunction (org.apache.flink.api.common.functions.RichFlatMapFunction)2 RestartStrategies (org.apache.flink.api.common.restartstrategy.RestartStrategies)2 CheckpointListener (org.apache.flink.api.common.state.CheckpointListener)2 ListState (org.apache.flink.api.common.state.ListState)2 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)2 ValueState (org.apache.flink.api.common.state.ValueState)2 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)2 KeySelector (org.apache.flink.api.java.functions.KeySelector)2 ParameterTool (org.apache.flink.api.java.utils.ParameterTool)2 FunctionInitializationContext (org.apache.flink.runtime.state.FunctionInitializationContext)2 FunctionSnapshotContext (org.apache.flink.runtime.state.FunctionSnapshotContext)2