use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project beam by apache.
the class FlinkKeyGroupStateInternalsTest method getKeyedStateBackend.
private KeyedStateBackend getKeyedStateBackend(int numberOfKeyGroups, KeyGroupRange keyGroupRange) {
MemoryStateBackend backend = new MemoryStateBackend();
try {
AbstractKeyedStateBackend<ByteBuffer> keyedStateBackend = backend.createKeyedStateBackend(new DummyEnvironment("test", 1, 0), new JobID(), "test_op", new GenericTypeInfo<>(ByteBuffer.class).createSerializer(new ExecutionConfig()), numberOfKeyGroups, keyGroupRange, new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()));
keyedStateBackend.setCurrentKey(ByteBuffer.wrap(CoderUtils.encodeToByteArray(StringUtf8Coder.of(), "1")));
return keyedStateBackend;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project beam by apache.
the class FlinkSplitStateInternalsTest method initStateInternals.
@Before
public void initStateInternals() {
MemoryStateBackend backend = new MemoryStateBackend();
try {
OperatorStateBackend operatorStateBackend = backend.createOperatorStateBackend(new DummyEnvironment("test", 1, 0), "");
underTest = new FlinkSplitStateInternals<>(operatorStateBackend);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project beam by apache.
the class FlinkStateInternalsTest method initStateInternals.
@Before
public void initStateInternals() {
MemoryStateBackend backend = new MemoryStateBackend();
try {
AbstractKeyedStateBackend<ByteBuffer> keyedStateBackend = backend.createKeyedStateBackend(new DummyEnvironment("test", 1, 0), new JobID(), "test_op", new GenericTypeInfo<>(ByteBuffer.class).createSerializer(new ExecutionConfig()), 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()));
underTest = new FlinkStateInternals<>(keyedStateBackend, StringUtf8Coder.of());
keyedStateBackend.setCurrentKey(ByteBuffer.wrap(CoderUtils.encodeToByteArray(StringUtf8Coder.of(), "Hello")));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class CEPOperatorTest method testKeyedCEPOperatorCheckpointingWithRocksDB.
@Test
public void testKeyedCEPOperatorCheckpointingWithRocksDB() throws Exception {
String rocksDbPath = tempFolder.newFolder().getAbsolutePath();
RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
OneInputStreamOperatorTestHarness<Event, Map<String, Event>> harness = getCepTestHarness(false);
harness.setStateBackend(rocksDBStateBackend);
harness.open();
Event startEvent = new Event(42, "start", 1.0);
SubEvent middleEvent = new SubEvent(42, "foo", 1.0, 10.0);
Event endEvent = new Event(42, "end", 1.0);
harness.processElement(new StreamRecord<>(startEvent, 1L));
harness.processElement(new StreamRecord<>(new Event(42, "foobar", 1.0), 2L));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorStateHandles snapshot = harness.snapshot(0L, 0L);
harness.close();
harness = getCepTestHarness(false);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processWatermark(new Watermark(Long.MIN_VALUE));
harness.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3L));
// if element timestamps are not correctly checkpointed/restored this will lead to
// a pruning time underflow exception in NFA
harness.processWatermark(new Watermark(2L));
// simulate snapshot/restore with empty element queue but NFA state
OperatorStateHandles snapshot2 = harness.snapshot(1L, 1L);
harness.close();
harness = getCepTestHarness(false);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot2);
harness.open();
harness.processElement(new StreamRecord<Event>(middleEvent, 3L));
harness.processElement(new StreamRecord<>(new Event(42, "start", 1.0), 4L));
harness.processElement(new StreamRecord<>(endEvent, 5L));
harness.processWatermark(new Watermark(Long.MAX_VALUE));
// get and verify the output
Queue<Object> result = harness.getOutput();
assertEquals(2, result.size());
verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
verifyWatermark(result.poll(), Long.MAX_VALUE);
harness.close();
}
use of org.apache.flink.runtime.state.memory.MemoryStateBackend in project flink by apache.
the class StatefulUDFSavepointMigrationITCase method testCreateSavepointOnFlink11WithRocksDB.
/**
* This has to be manually executed to create the savepoint on Flink 1.1.
*/
@Test
@Ignore
public void testCreateSavepointOnFlink11WithRocksDB() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
RocksDBStateBackend rocksBackend = new RocksDBStateBackend(new MemoryStateBackend());
// rocksBackend.enableFullyAsyncSnapshots();
env.setStateBackend(rocksBackend);
env.enableCheckpointing(500);
env.setParallelism(4);
env.setMaxParallelism(4);
// create source
env.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource").flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap").keyBy(0).flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState").keyBy(0).flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap").keyBy(0).transform("custom_operator", new TypeHint<Tuple2<Long, Long>>() {
}.getTypeInfo(), new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator").addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));
executeAndSavepoint(env, "src/test/resources/stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb", new Tuple2<>(EXPECTED_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
Aggregations