use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class SequenceFileWriterFactory method create.
@Override
public SequenceFileWriter<K, V> create(FSDataOutputStream out) throws IOException {
org.apache.hadoop.fs.FSDataOutputStream stream = new org.apache.hadoop.fs.FSDataOutputStream(out, null);
CompressionCodec compressionCodec = getCompressionCodec(serializableHadoopConfig.get(), compressionCodecName);
SequenceFile.Writer writer = SequenceFile.createWriter(serializableHadoopConfig.get(), SequenceFile.Writer.stream(stream), SequenceFile.Writer.keyClass(keyClass), SequenceFile.Writer.valueClass(valueClass), SequenceFile.Writer.compression(compressionType, compressionCodec));
return new SequenceFileWriter<>(writer);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class CompactOperatorTest method create.
private OneInputStreamOperatorTestHarness<CoordinatorOutput, PartitionCommitInfo> create(int parallelism, int subtaskIndex) throws Exception {
CompactOperator<Byte> operator = new CompactOperator<>(() -> folder.getFileSystem(), CompactBulkReader.factory(TestByteFormat.bulkFormat()), context -> {
Path path = context.getPath();
Path tempPath = new Path(path.getParent(), "." + path.getName());
FSDataOutputStream out = context.getFileSystem().create(tempPath, FileSystem.WriteMode.OVERWRITE);
return new CompactWriter<Byte>() {
@Override
public void write(Byte record) throws IOException {
out.write(record);
}
@Override
public void commit() throws IOException {
out.close();
context.getFileSystem().rename(tempPath, path);
}
};
});
return new OneInputStreamOperatorTestHarness<>(operator, parallelism, parallelism, subtaskIndex);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class FileUtils method internalCopyFile.
private static void internalCopyFile(Path sourcePath, Path targetPath, boolean executable, FileSystem sFS, FileSystem tFS) throws IOException {
try (FSDataOutputStream lfsOutput = tFS.create(targetPath, FileSystem.WriteMode.NO_OVERWRITE);
FSDataInputStream fsInput = sFS.open(sourcePath)) {
IOUtils.copyBytes(fsInput, lfsOutput);
// noinspection ResultOfMethodCallIgnored
new File(targetPath.toString()).setExecutable(executable);
}
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class FileUtils method expandDirectory.
public static Path expandDirectory(Path file, Path targetDirectory) throws IOException {
FileSystem sourceFs = file.getFileSystem();
FileSystem targetFs = targetDirectory.getFileSystem();
Path rootDir = null;
try (ZipInputStream zis = new ZipInputStream(sourceFs.open(file))) {
ZipEntry entry;
while ((entry = zis.getNextEntry()) != null) {
Path relativePath = new Path(entry.getName());
if (rootDir == null) {
// the first entry contains the name of the original directory that was zipped
rootDir = relativePath;
}
Path newFile = new Path(targetDirectory, relativePath);
if (entry.isDirectory()) {
targetFs.mkdirs(newFile);
} else {
try (FSDataOutputStream fileStream = targetFs.create(newFile, FileSystem.WriteMode.NO_OVERWRITE)) {
// do not close the streams here as it prevents access to further zip
// entries
IOUtils.copyBytes(zis, fileStream, false);
}
}
zis.closeEntry();
}
}
return new Path(targetDirectory, rootDir);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class LocalFileSystemTest method testCreatingFileInCurrentDirectoryWithRelativePath.
/**
* This test verifies the issue https://issues.apache.org/jira/browse/FLINK-18612.
*/
@Test
public void testCreatingFileInCurrentDirectoryWithRelativePath() throws IOException {
FileSystem fs = FileSystem.getLocalFileSystem();
Path filePath = new Path("local_fs_test_" + RandomStringUtils.randomAlphanumeric(16));
try (FSDataOutputStream outputStream = fs.create(filePath, WriteMode.OVERWRITE)) {
// Do nothing.
} finally {
for (int i = 0; i < 10 && fs.exists(filePath); ++i) {
fs.delete(filePath, true);
}
}
}
Aggregations