use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class CEPOperatorTest method testKeyedCEPOperatorCheckpointingWithRocksDB.
@Test
public void testKeyedCEPOperatorCheckpointingWithRocksDB() throws Exception {
String rocksDbPath = tempFolder.newFolder().getAbsolutePath();
RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend(), TernaryBoolean.FALSE);
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = getCepTestHarness(false);
try {
harness.setStateBackend(rocksDBStateBackend);
harness.open();
Event startEvent = new Event(42, "start", 1.0);
SubEvent middleEvent = new SubEvent(42, "foo", 1.0, 10.0);
Event endEvent = new Event(42, "end", 1.0);
harness.processElement(new StreamRecord<>(startEvent, 1L));
harness.processElement(new StreamRecord<>(new Event(42, "foobar", 1.0), 2L));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
harness.close();
harness = getCepTestHarness(false);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processWatermark(new Watermark(Long.MIN_VALUE));
harness.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3L));
// if element timestamps are not correctly checkpointed/restored this will lead to
// a pruning time underflow exception in NFA
harness.processWatermark(new Watermark(2L));
// simulate snapshot/restore with empty element queue but NFA state
OperatorSubtaskState snapshot2 = harness.snapshot(1L, 1L);
harness.close();
harness = getCepTestHarness(false);
rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend());
rocksDBStateBackend.setDbStoragePath(rocksDbPath);
harness.setStateBackend(rocksDBStateBackend);
harness.setup();
harness.initializeState(snapshot2);
harness.open();
harness.processElement(new StreamRecord<Event>(middleEvent, 3L));
harness.processElement(new StreamRecord<>(new Event(42, "start", 1.0), 4L));
harness.processElement(new StreamRecord<>(endEvent, 5L));
harness.processWatermark(new Watermark(Long.MAX_VALUE));
// get and verify the output
Queue<Object> result = harness.getOutput();
assertEquals(2, result.size());
verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
verifyWatermark(result.poll(), Long.MAX_VALUE);
} finally {
harness.close();
}
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class WritableSavepointITCase method testRocksDBStateBackend.
@Test
public void testRocksDBStateBackend() throws Exception {
StateBackend backend = new RocksDBStateBackend(new FsStateBackend(TEMPORARY_FOLDER.newFolder().toURI(), FILE_STATE_SIZE));
testStateBootstrapAndModification(backend);
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project beam by apache.
the class FlinkExecutionEnvironments method configureStateBackend.
private static void configureStateBackend(FlinkPipelineOptions options, StreamExecutionEnvironment env) {
final StateBackend stateBackend;
if (options.getStateBackend() != null) {
final String storagePath = options.getStateBackendStoragePath();
Preconditions.checkArgument(storagePath != null, "State backend was set to '%s' but no storage path was provided.", options.getStateBackend());
if (options.getStateBackend().equalsIgnoreCase("rocksdb")) {
try {
stateBackend = new RocksDBStateBackend(storagePath);
} catch (Exception e) {
throw new RuntimeException("Could not create RocksDB state backend. Make sure it is included in the path.", e);
}
} else if (options.getStateBackend().equalsIgnoreCase("filesystem")) {
stateBackend = new FsStateBackend(storagePath);
} else {
throw new IllegalArgumentException(String.format("Unknown state backend '%s'. Use 'rocksdb' or 'filesystem' or configure via Flink config file.", options.getStateBackend()));
}
} else if (options.getStateBackendFactory() != null) {
// Legacy way of setting the state backend
stateBackend = InstanceBuilder.ofType(FlinkStateBackendFactory.class).fromClass(options.getStateBackendFactory()).build().createStateBackend(options);
} else {
stateBackend = null;
}
if (stateBackend != null) {
env.setStateBackend(stateBackend);
}
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class RocksDBTtlStateTestBase method createStateBackend.
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) {
String dbPath;
String checkpointPath;
try {
dbPath = tempFolder.newFolder().getAbsolutePath();
checkpointPath = tempFolder.newFolder().toURI().toString();
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to init rocksdb test state backend");
}
RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing);
Configuration config = new Configuration();
backend = backend.configure(config, Thread.currentThread().getContextClassLoader());
backend.setDbStoragePath(dbPath);
return backend;
}
use of org.apache.flink.contrib.streaming.state.RocksDBStateBackend in project flink by apache.
the class StreamOperatorSnapshotRestoreTest method testOperatorStatesSnapshotRestoreInternal.
private void testOperatorStatesSnapshotRestoreInternal(final int mode) throws Exception {
// -------------------------------------------------------------------------- snapshot
StateBackend stateBackend;
FsStateBackend fsstateBackend = createStateBackendInternal();
switch(stateBackendEnum) {
case FILE:
stateBackend = fsstateBackend;
break;
case ROCKSDB_FULLY_ASYNC:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.FALSE);
break;
case ROCKSDB_INCREMENTAL:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.TRUE);
break;
default:
throw new IllegalStateException(String.format("Do not support statebackend type %s", stateBackendEnum));
}
TestOneInputStreamOperator op = new TestOneInputStreamOperator(false);
JobID jobID = new JobID();
JobVertexID jobVertexID = new JobVertexID();
int subtaskIdx = 0;
LocalRecoveryDirectoryProvider directoryProvider = mode == ONLY_JM_RECOVERY ? null : new LocalRecoveryDirectoryProviderImpl(temporaryFolder.newFolder(), jobID, jobVertexID, subtaskIdx);
LocalRecoveryConfig localRecoveryConfig = new LocalRecoveryConfig(directoryProvider);
MockEnvironment mockEnvironment = new MockEnvironmentBuilder().setJobID(jobID).setJobVertexID(jobVertexID).setTaskName("test").setManagedMemorySize(1024L * 1024L).setInputSplitProvider(new MockInputSplitProvider()).setBufferSize(1024 * 1024).setTaskStateManager(new TestTaskStateManager(localRecoveryConfig)).setMaxParallelism(MAX_PARALLELISM).setSubtaskIndex(subtaskIdx).setUserCodeClassLoader(getClass().getClassLoader()).build();
KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), mockEnvironment);
testHarness.setStateBackend(stateBackend);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
OperatorSnapshotFinalizer snapshotWithLocalState = testHarness.snapshotWithLocalState(1L, 1L);
testHarness.close();
// -------------------------------------------------------------------------- restore
op = new TestOneInputStreamOperator(true);
testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), MAX_PARALLELISM, 1, /* num subtasks */
0);
testHarness.setTimeServiceManagerProvider(new InternalTimeServiceManager.Provider() {
@Override
public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws IOException {
return null;
}
});
testHarness.setStateBackend(stateBackend);
OperatorSubtaskState jobManagerOwnedState = snapshotWithLocalState.getJobManagerOwnedState();
OperatorSubtaskState taskLocalState = snapshotWithLocalState.getTaskLocalState();
// We check if local state was created when we enabled local recovery
Assert.assertTrue(mode > ONLY_JM_RECOVERY == (taskLocalState != null && taskLocalState.hasState()));
if (mode == TM_REMOVE_JM_RECOVERY) {
jobManagerOwnedState.getManagedKeyedState().discardState();
} else if (mode == JM_REMOVE_TM_RECOVERY) {
taskLocalState.getManagedKeyedState().discardState();
}
testHarness.initializeState(jobManagerOwnedState, taskLocalState);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
testHarness.close();
}
Aggregations