use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class AzureFileSystemBehaviorITCase method testDirectoryListing.
@Test
public void testDirectoryListing() throws Exception {
// 30 secs
final long deadline = System.nanoTime() + 30_000_000_000L;
final Path directory = new Path(getBasePath() + "/testdir/");
final FileSystem fs = directory.getFileSystem();
// directory must not yet exist
assertFalse(fs.exists(directory));
try {
// create directory
assertTrue(fs.mkdirs(directory));
checkPathEventualExistence(fs, directory, true, deadline);
// directory empty
assertEquals(0, fs.listStatus(directory).length);
// create some files
final int numFiles = 3;
for (int i = 0; i < numFiles; i++) {
Path file = new Path(directory, "/file-" + i);
try (FSDataOutputStream out = fs.create(file, FileSystem.WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write("hello-" + i + "\n");
}
// just in case, wait for the file to exist (should then also be reflected in the
// directory's file list below)
checkPathEventualExistence(fs, file, true, deadline);
}
FileStatus[] files = fs.listStatus(directory);
assertNotNull(files);
assertEquals(3, files.length);
for (FileStatus status : files) {
assertFalse(status.isDir());
}
// now that there are files, the directory must exist
assertTrue(fs.exists(directory));
} finally {
// clean up
fs.delete(directory, true);
}
// now directory must be gone
checkPathEventualExistence(fs, directory, false, deadline);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class AzureFileSystemBehaviorITCase method testSimpleFileWriteAndRead.
@Test
public void testSimpleFileWriteAndRead() throws Exception {
// 30 secs
final long deadline = System.nanoTime() + 30_000_000_000L;
final String testLine = "Hello Upload!";
final Path path = new Path(getBasePath() + "/test.txt");
final FileSystem fs = path.getFileSystem();
try {
try (FSDataOutputStream out = fs.create(path, FileSystem.WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write(testLine);
}
// just in case, wait for the path to exist
checkPathEventualExistence(fs, path, true, deadline);
try (FSDataInputStream in = fs.open(path);
InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(ir)) {
String line = reader.readLine();
assertEquals(testLine, line);
}
} finally {
fs.delete(path, false);
}
// now file must be gone
checkPathEventualExistence(fs, path, false, deadline);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class AbstractHadoopFileSystemITTest method testDirectoryListing.
@Test
public void testDirectoryListing() throws Exception {
final Path directory = new Path(basePath, "testdir/");
// directory must not yet exist
assertFalse(fs.exists(directory));
try {
// create directory
assertTrue(fs.mkdirs(directory));
checkEmptyDirectory(directory);
// directory empty
assertEquals(0, fs.listStatus(directory).length);
// create some files
final int numFiles = 3;
for (int i = 0; i < numFiles; i++) {
Path file = new Path(directory, "/file-" + i);
try (FSDataOutputStream out = fs.create(file, FileSystem.WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write("hello-" + i + "\n");
}
// just in case, wait for the file to exist (should then also be reflected in the
// directory's file list below)
checkPathExistence(file, true, consistencyToleranceNS);
}
FileStatus[] files = fs.listStatus(directory);
assertNotNull(files);
assertEquals(3, files.length);
for (FileStatus status : files) {
assertFalse(status.isDir());
}
// now that there are files, the directory must exist
assertTrue(fs.exists(directory));
} finally {
// clean up
cleanupDirectoryWithRetry(fs, directory, consistencyToleranceNS);
}
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class TestManagedSinkCommitter method commitAdd.
private void commitAdd(Map<CatalogPartitionSpec, List<RowData>> toAdd, Map<CatalogPartitionSpec, List<Path>> managedTableFileEntries) throws IOException {
Map<CatalogPartitionSpec, String> processedPartitions = new HashMap<>();
for (Map.Entry<CatalogPartitionSpec, List<RowData>> entry : toAdd.entrySet()) {
CatalogPartitionSpec partitionSpec = entry.getKey();
String partition = processedPartitions.computeIfAbsent(partitionSpec, (spec) -> PartitionPathUtils.generatePartitionPath(new LinkedHashMap<>(spec.getPartitionSpec())));
List<RowData> elements = entry.getValue();
Path compactFilePath = new Path(basePath, new Path(String.format("%scompact-%s-file-0", partition, UUID.randomUUID())));
FSDataOutputStream outputStream = compactFilePath.getFileSystem().create(compactFilePath, FileSystem.WriteMode.NO_OVERWRITE);
for (RowData element : elements) {
encoder.encode(element, outputStream);
}
outputStream.flush();
outputStream.close();
List<Path> fileEntries = managedTableFileEntries.get(partitionSpec);
fileEntries.add(compactFilePath);
managedTableFileEntries.put(partitionSpec, fileEntries);
}
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class CheckpointStateOutputStreamTest method testCleanupWhenFailingCloseAndGetHandle.
/**
* Tests that the underlying stream file is deleted if the closeAndGetHandle method fails.
*/
@Test
public void testCleanupWhenFailingCloseAndGetHandle() throws IOException {
final Path folder = new Path(tmp.newFolder().toURI());
final String fileName = "test_name";
final Path filePath = new Path(folder, fileName);
final FileSystem fs = spy(new FsWithoutRecoverableWriter((path) -> new FailingCloseStream(new File(path.getPath()))));
FSDataOutputStream stream = createTestStream(fs, folder, fileName);
stream.write(new byte[] { 1, 2, 3, 4, 5 });
try {
closeAndGetResult(stream);
fail("Expected IOException");
} catch (IOException ignored) {
// expected exception
}
verify(fs).delete(filePath, false);
}
Aggregations