use of org.apache.flink.runtime.state.filesystem.FsStateBackend in project flink by apache.
the class StateBackendLoadingTest method testLoadFileSystemStateBackendMixed.
/**
* Validates taking the application-defined file system state backend and adding with additional
* parameters from the cluster configuration, but giving precedence to application-defined
* parameters over configuration-defined parameters.
*/
@Test
public void testLoadFileSystemStateBackendMixed() throws Exception {
final String appCheckpointDir = new Path(tmp.newFolder().toURI()).toString();
final String checkpointDir = new Path(tmp.newFolder().toURI()).toString();
final String savepointDir = new Path(tmp.newFolder().toURI()).toString();
final Path expectedCheckpointsPath = new Path(new URI(appCheckpointDir));
final Path expectedSavepointsPath = new Path(savepointDir);
final int threshold = 1000000;
final int writeBufferSize = 4000000;
final FsStateBackend backend = new FsStateBackend(new URI(appCheckpointDir), null, threshold, writeBufferSize, TernaryBoolean.TRUE);
final Configuration config = new Configuration();
// this should not be picked up
config.setString(backendKey, "jobmanager");
config.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, // this should not be picked up
checkpointDir);
config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir);
config.set(CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, // this should not be picked up
MemorySize.parse("20"));
config.setInteger(CheckpointingOptions.FS_WRITE_BUFFER_SIZE, // this should not be picked up
3000000);
final StateBackend loadedBackend = StateBackendLoader.fromApplicationOrConfigOrDefault(backend, TernaryBoolean.UNDEFINED, config, cl, null);
assertTrue(loadedBackend instanceof FsStateBackend);
final FsStateBackend fs = (FsStateBackend) loadedBackend;
assertEquals(expectedCheckpointsPath, fs.getCheckpointPath());
assertEquals(expectedSavepointsPath, fs.getSavepointPath());
assertEquals(threshold, fs.getMinFileSizeThreshold());
assertEquals(writeBufferSize, fs.getWriteBufferSize());
}
use of org.apache.flink.runtime.state.filesystem.FsStateBackend in project flink by apache.
the class RocksDBTtlStateTestBase method createStateBackend.
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) {
String dbPath;
String checkpointPath;
try {
dbPath = tempFolder.newFolder().getAbsolutePath();
checkpointPath = tempFolder.newFolder().toURI().toString();
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to init rocksdb test state backend");
}
RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing);
Configuration config = new Configuration();
backend = backend.configure(config, Thread.currentThread().getContextClassLoader());
backend.setDbStoragePath(dbPath);
return backend;
}
use of org.apache.flink.runtime.state.filesystem.FsStateBackend in project flink by apache.
the class RocksDBStateBackendMigrationTest method getStateBackend.
@Override
protected RocksDBStateBackend getStateBackend() throws IOException {
dbPath = tempFolder.newFolder().getAbsolutePath();
String checkpointPath = tempFolder.newFolder().toURI().toString();
RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing);
Configuration configuration = new Configuration();
configuration.set(RocksDBOptions.TIMER_SERVICE_FACTORY, EmbeddedRocksDBStateBackend.PriorityQueueStateType.ROCKSDB);
backend = backend.configure(configuration, Thread.currentThread().getContextClassLoader());
backend.setDbStoragePath(dbPath);
return backend;
}
use of org.apache.flink.runtime.state.filesystem.FsStateBackend in project flink by apache.
the class CheckpointCoordinatorTest method testBaseLocationsNotInitialized.
@Test
public void testBaseLocationsNotInitialized() throws Exception {
File checkpointDir = tmpFolder.newFolder();
JobVertexID jobVertexID = new JobVertexID();
ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID).setTransitToRunning(false).build();
CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setCheckpointCoordinatorConfiguration(CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(Long.MAX_VALUE).build()).setCheckpointStorage(new FsStateBackend(checkpointDir.toURI())).build();
Path jobCheckpointPath = new Path(checkpointDir.getAbsolutePath(), graph.getJobID().toString());
FileSystem fs = FileSystem.get(checkpointDir.toURI());
// directory will not be created if checkpointing is disabled
Assert.assertFalse(fs.exists(jobCheckpointPath));
}
use of org.apache.flink.runtime.state.filesystem.FsStateBackend in project flink by apache.
the class StreamOperatorSnapshotRestoreTest method testOperatorStatesSnapshotRestoreInternal.
private void testOperatorStatesSnapshotRestoreInternal(final int mode) throws Exception {
// -------------------------------------------------------------------------- snapshot
StateBackend stateBackend;
FsStateBackend fsstateBackend = createStateBackendInternal();
switch(stateBackendEnum) {
case FILE:
stateBackend = fsstateBackend;
break;
case ROCKSDB_FULLY_ASYNC:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.FALSE);
break;
case ROCKSDB_INCREMENTAL:
stateBackend = new RocksDBStateBackend(fsstateBackend, TernaryBoolean.TRUE);
break;
default:
throw new IllegalStateException(String.format("Do not support statebackend type %s", stateBackendEnum));
}
TestOneInputStreamOperator op = new TestOneInputStreamOperator(false);
JobID jobID = new JobID();
JobVertexID jobVertexID = new JobVertexID();
int subtaskIdx = 0;
LocalRecoveryDirectoryProvider directoryProvider = mode == ONLY_JM_RECOVERY ? null : new LocalRecoveryDirectoryProviderImpl(temporaryFolder.newFolder(), jobID, jobVertexID, subtaskIdx);
LocalRecoveryConfig localRecoveryConfig = new LocalRecoveryConfig(directoryProvider);
MockEnvironment mockEnvironment = new MockEnvironmentBuilder().setJobID(jobID).setJobVertexID(jobVertexID).setTaskName("test").setManagedMemorySize(1024L * 1024L).setInputSplitProvider(new MockInputSplitProvider()).setBufferSize(1024 * 1024).setTaskStateManager(new TestTaskStateManager(localRecoveryConfig)).setMaxParallelism(MAX_PARALLELISM).setSubtaskIndex(subtaskIdx).setUserCodeClassLoader(getClass().getClassLoader()).build();
KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), mockEnvironment);
testHarness.setStateBackend(stateBackend);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
OperatorSnapshotFinalizer snapshotWithLocalState = testHarness.snapshotWithLocalState(1L, 1L);
testHarness.close();
// -------------------------------------------------------------------------- restore
op = new TestOneInputStreamOperator(true);
testHarness = new KeyedOneInputStreamOperatorTestHarness<>(op, (KeySelector<Integer, Integer>) value -> value, TypeInformation.of(Integer.class), MAX_PARALLELISM, 1, /* num subtasks */
0);
testHarness.setTimeServiceManagerProvider(new InternalTimeServiceManager.Provider() {
@Override
public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws IOException {
return null;
}
});
testHarness.setStateBackend(stateBackend);
OperatorSubtaskState jobManagerOwnedState = snapshotWithLocalState.getJobManagerOwnedState();
OperatorSubtaskState taskLocalState = snapshotWithLocalState.getTaskLocalState();
// We check if local state was created when we enabled local recovery
Assert.assertTrue(mode > ONLY_JM_RECOVERY == (taskLocalState != null && taskLocalState.hasState()));
if (mode == TM_REMOVE_JM_RECOVERY) {
jobManagerOwnedState.getManagedKeyedState().discardState();
} else if (mode == JM_REMOVE_TM_RECOVERY) {
taskLocalState.getManagedKeyedState().discardState();
}
testHarness.initializeState(jobManagerOwnedState, taskLocalState);
testHarness.open();
for (int i = 0; i < 10; ++i) {
testHarness.processElement(new StreamRecord<>(i));
}
testHarness.close();
}
Aggregations