use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class AbstractFileCheckpointStorageAccessTestBase method testPersistMultipleMetadataOnlyCheckpoints.
// ------------------------------------------------------------------------
// checkpoints
// ------------------------------------------------------------------------
/**
* Validates that multiple checkpoints from different jobs with the same checkpoint ID do not
* interfere with each other.
*/
@Test
public void testPersistMultipleMetadataOnlyCheckpoints() throws Exception {
final FileSystem fs = FileSystem.getLocalFileSystem();
final Path checkpointDir = new Path(tmp.newFolder().toURI());
final long checkpointId = 177;
final CheckpointStorageAccess storage1 = createCheckpointStorage(checkpointDir);
storage1.initializeBaseLocationsForCheckpoint();
final CheckpointStorageAccess storage2 = createCheckpointStorage(checkpointDir);
storage2.initializeBaseLocationsForCheckpoint();
final CheckpointStorageLocation loc1 = storage1.initializeLocationForCheckpoint(checkpointId);
final CheckpointStorageLocation loc2 = storage2.initializeLocationForCheckpoint(checkpointId);
final byte[] data1 = { 77, 66, 55, 99, 88 };
final byte[] data2 = { 1, 3, 2, 5, 4 };
final CompletedCheckpointStorageLocation completedLocation1;
try (CheckpointMetadataOutputStream out = loc1.createMetadataOutputStream()) {
out.write(data1);
completedLocation1 = out.closeAndFinalizeCheckpoint();
}
final String result1 = completedLocation1.getExternalPointer();
final CompletedCheckpointStorageLocation completedLocation2;
try (CheckpointMetadataOutputStream out = loc2.createMetadataOutputStream()) {
out.write(data2);
completedLocation2 = out.closeAndFinalizeCheckpoint();
}
final String result2 = completedLocation2.getExternalPointer();
// check that this went to a file, but in a nested directory structure
// one directory per storage
FileStatus[] files = fs.listStatus(checkpointDir);
assertEquals(2, files.length);
// in each per-storage directory, one for the checkpoint
FileStatus[] job1Files = fs.listStatus(files[0].getPath());
FileStatus[] job2Files = fs.listStatus(files[1].getPath());
assertTrue(job1Files.length >= 1);
assertTrue(job2Files.length >= 1);
assertTrue(fs.exists(new Path(result1, AbstractFsCheckpointStorageAccess.METADATA_FILE_NAME)));
assertTrue(fs.exists(new Path(result2, AbstractFsCheckpointStorageAccess.METADATA_FILE_NAME)));
// check that both storages can resolve each others contents
validateContents(storage1.resolveCheckpoint(result1).getMetadataHandle(), data1);
validateContents(storage1.resolveCheckpoint(result2).getMetadataHandle(), data2);
validateContents(storage2.resolveCheckpoint(result1).getMetadataHandle(), data1);
validateContents(storage2.resolveCheckpoint(result2).getMetadataHandle(), data2);
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class RocksDBStateUploaderTest method testMultiThreadUploadCorrectly.
/**
* Test that upload files with multi-thread correctly.
*/
@Test
public void testMultiThreadUploadCorrectly() throws Exception {
File checkpointPrivateFolder = temporaryFolder.newFolder("private");
org.apache.flink.core.fs.Path checkpointPrivateDirectory = org.apache.flink.core.fs.Path.fromLocalFile(checkpointPrivateFolder);
File checkpointSharedFolder = temporaryFolder.newFolder("shared");
org.apache.flink.core.fs.Path checkpointSharedDirectory = org.apache.flink.core.fs.Path.fromLocalFile(checkpointSharedFolder);
FileSystem fileSystem = checkpointPrivateDirectory.getFileSystem();
int fileStateSizeThreshold = 1024;
int writeBufferSize = 4096;
FsCheckpointStreamFactory checkpointStreamFactory = new FsCheckpointStreamFactory(fileSystem, checkpointPrivateDirectory, checkpointSharedDirectory, fileStateSizeThreshold, writeBufferSize);
String localFolder = "local";
temporaryFolder.newFolder(localFolder);
int sstFileCount = 6;
Map<StateHandleID, Path> sstFilePaths = generateRandomSstFiles(localFolder, sstFileCount, fileStateSizeThreshold);
try (RocksDBStateUploader rocksDBStateUploader = new RocksDBStateUploader(5)) {
Map<StateHandleID, StreamStateHandle> sstFiles = rocksDBStateUploader.uploadFilesToCheckpointFs(sstFilePaths, checkpointStreamFactory, CheckpointedStateScope.SHARED, new CloseableRegistry());
for (Map.Entry<StateHandleID, Path> entry : sstFilePaths.entrySet()) {
assertStateContentEqual(entry.getValue(), sstFiles.get(entry.getKey()).openInputStream());
}
}
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class TaskLocalStateStoreImpl method deleteDirectory.
/**
* Helper method to delete a directory.
*/
private void deleteDirectory(File directory) throws IOException {
Path path = new Path(directory.toURI());
FileSystem fileSystem = path.getFileSystem();
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);
}
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class CheckpointCoordinatorTest method testBaseLocationsNotInitialized.
@Test
public void testBaseLocationsNotInitialized() throws Exception {
File checkpointDir = tmpFolder.newFolder();
JobVertexID jobVertexID = new JobVertexID();
ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID).setTransitToRunning(false).build();
CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setCheckpointCoordinatorConfiguration(CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(Long.MAX_VALUE).build()).setCheckpointStorage(new FsStateBackend(checkpointDir.toURI())).build();
Path jobCheckpointPath = new Path(checkpointDir.getAbsolutePath(), graph.getJobID().toString());
FileSystem fs = FileSystem.get(checkpointDir.toURI());
// directory will not be created if checkpointing is disabled
Assert.assertFalse(fs.exists(jobCheckpointPath));
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class FileSystemCommitter method commitPartitions.
/**
* For committing job's output after successful batch job completion.
*/
public void commitPartitions() throws Exception {
FileSystem fs = factory.create(tmpPath.toUri());
List<Path> taskPaths = listTaskTemporaryPaths(fs, tmpPath);
try (PartitionLoader loader = new PartitionLoader(overwrite, fs, metaStoreFactory)) {
if (partitionColumnSize > 0) {
for (Map.Entry<LinkedHashMap<String, String>, List<Path>> entry : collectPartSpecToPaths(fs, taskPaths, partitionColumnSize).entrySet()) {
loader.loadPartition(entry.getKey(), entry.getValue());
}
} else {
loader.loadNonPartition(taskPaths);
}
} finally {
for (Path taskPath : taskPaths) {
fs.delete(taskPath, true);
}
}
}
Aggregations