use of org.apache.flink.runtime.state.LocalRecoveryConfig in project flink by apache.
the class EmbeddedRocksDBStateBackend method createKeyedStateBackend.
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry, double managedMemoryFraction) throws IOException {
// first, make sure that the RocksDB JNI library is loaded
// we do this explicitly here to have better error handling
String tempDir = env.getTaskManagerInfo().getTmpWorkingDirectory().getAbsolutePath();
ensureRocksDBIsLoaded(tempDir);
// replace all characters that are not legal for filenames with underscore
String fileCompatibleIdentifier = operatorIdentifier.replaceAll("[^a-zA-Z0-9\\-]", "_");
lazyInitializeForJob(env, fileCompatibleIdentifier);
File instanceBasePath = new File(getNextStoragePath(), "job_" + jobId + "_op_" + fileCompatibleIdentifier + "_uuid_" + UUID.randomUUID());
LocalRecoveryConfig localRecoveryConfig = env.getTaskStateManager().createLocalRecoveryConfig();
final OpaqueMemoryResource<RocksDBSharedResources> sharedResources = RocksDBOperationUtils.allocateSharedCachesIfConfigured(memoryConfiguration, env.getMemoryManager(), managedMemoryFraction, LOG);
if (sharedResources != null) {
LOG.info("Obtained shared RocksDB cache of size {} bytes", sharedResources.getSize());
}
final RocksDBResourceContainer resourceContainer = createOptionsAndResourceContainer(sharedResources);
ExecutionConfig executionConfig = env.getExecutionConfig();
StreamCompressionDecorator keyGroupCompressionDecorator = getCompressionDecorator(executionConfig);
LatencyTrackingStateConfig latencyTrackingStateConfig = latencyTrackingConfigBuilder.setMetricGroup(metricGroup).build();
RocksDBKeyedStateBackendBuilder<K> builder = new RocksDBKeyedStateBackendBuilder<>(operatorIdentifier, env.getUserCodeClassLoader().asClassLoader(), instanceBasePath, resourceContainer, stateName -> resourceContainer.getColumnOptions(), kvStateRegistry, keySerializer, numberOfKeyGroups, keyGroupRange, executionConfig, localRecoveryConfig, getPriorityQueueStateType(), ttlTimeProvider, latencyTrackingStateConfig, metricGroup, stateHandles, keyGroupCompressionDecorator, cancelStreamRegistry).setEnableIncrementalCheckpointing(isIncrementalCheckpointsEnabled()).setNumberOfTransferingThreads(getNumberOfTransferThreads()).setNativeMetricOptions(resourceContainer.getMemoryWatcherOptions(defaultMetricOptions)).setWriteBatchSize(getWriteBatchSize());
return builder.build();
}
use of org.apache.flink.runtime.state.LocalRecoveryConfig in project flink by apache.
the class StreamOperatorSnapshotRestoreTest method testOperatorStatesSnapshotRestoreInternal.
private void testOperatorStatesSnapshotRestoreInternal(final int mode) throws Exception {
// -------------------------------------------------------------------------- snapshot
StateBackend stateBackend;
FsStateBackend fsstateBackend = createStateBackendInternal();
switch(stateBackendEnum) {
case FILE:
stateBackend = fsstateBackend;
break;
case ROCKSDB_FULLY_ASYNC:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.FALSE);
break;
case ROCKSDB_INCREMENTAL:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.TRUE);
break;
default:
throw new IllegalStateException(String.format("Do not support statebackend type %s", stateBackendEnum));
}
TestOneInputStreamOperator op = new TestOneInputStreamOperator(false);
JobID jobID = new JobID();
JobVertexID jobVertexID = new JobVertexID();
int subtaskIdx = 0;
LocalRecoveryDirectoryProvider directoryProvider = mode == ONLY_JM_RECOVERY ? null : new LocalRecoveryDirectoryProviderImpl(temporaryFolder.newFolder(), jobID, jobVertexID, subtaskIdx);
LocalRecoveryConfig localRecoveryConfig = new LocalRecoveryConfig(directoryProvider);
MockEnvironment mockEnvironment = new MockEnvironmentBuilder().setJobID(jobID).setJobVertexID(jobVertexID).setTaskName("test").setManagedMemorySize(1024L * 1024L).setInputSplitProvider(new MockInputSplitProvider()).setBufferSize(1024 * 1024).setTaskStateManager(new TestTaskStateManager(localRecoveryConfig)).setMaxParallelism(MAX_PARALLELISM).setSubtaskIndex(subtaskIdx).setUserCodeClassLoader(getClass().getClassLoader()).build();
KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), mockEnvironment);
testHarness.setStateBackend(stateBackend);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
OperatorSnapshotFinalizer snapshotWithLocalState = testHarness.snapshotWithLocalState(1L, 1L);
testHarness.close();
// -------------------------------------------------------------------------- restore
op = new TestOneInputStreamOperator(true);
testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), MAX_PARALLELISM, 1, /* num subtasks */
0);
testHarness.setTimeServiceManagerProvider(new InternalTimeServiceManager.Provider() {
@Override
public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws IOException {
return null;
}
});
testHarness.setStateBackend(stateBackend);
OperatorSubtaskState jobManagerOwnedState = snapshotWithLocalState.getJobManagerOwnedState();
OperatorSubtaskState taskLocalState = snapshotWithLocalState.getTaskLocalState();
// We check if local state was created when we enabled local recovery
Assert.assertTrue(mode > ONLY_JM_RECOVERY == (taskLocalState != null && taskLocalState.hasState()));
if (mode == TM_REMOVE_JM_RECOVERY) {
jobManagerOwnedState.getManagedKeyedState().discardState();
} else if (mode == JM_REMOVE_TM_RECOVERY) {
taskLocalState.getManagedKeyedState().discardState();
}
testHarness.initializeState(jobManagerOwnedState, taskLocalState);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
testHarness.close();
}
Aggregations