use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class FsJobArchivist method getArchivedJsons.
/**
* Reads the given archive file and returns a {@link Collection} of contained {@link
* ArchivedJson}.
*
* @param file archive to extract
* @return collection of archived jsons
* @throws IOException if the file can't be opened, read or doesn't contain valid json
*/
public static Collection<ArchivedJson> getArchivedJsons(Path file) throws IOException {
try (FSDataInputStream input = file.getFileSystem().open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream()) {
IOUtils.copyBytes(input, output);
try {
JsonNode archive = mapper.readTree(output.toByteArray());
Collection<ArchivedJson> archives = new ArrayList<>();
for (JsonNode archivePart : archive.get(ARCHIVE)) {
String path = archivePart.get(PATH).asText();
String json = archivePart.get(JSON).asText();
archives.add(new ArchivedJson(path, json));
}
return archives;
} catch (NullPointerException npe) {
// occurs if the archive is empty or any of the expected fields are not present
throw new IOException("Job archive (" + file.getPath() + ") did not conform to expected format.");
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class ChannelStateChunkReaderTest method testBufferRecycledOnSuccess.
@Test
public void testBufferRecycledOnSuccess() throws IOException, InterruptedException {
ChannelStateSerializer serializer = new ChannelStateSerializerImpl();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = getStream(serializer, 10)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo", 0);
} finally {
checkState(!handler.requestedBuffers.isEmpty());
assertTrue(handler.requestedBuffers.stream().allMatch(TestChannelStateByteBuffer::isRecycled));
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class ChannelStateChunkReaderTest method testBuffersNotRequestedForEmptyStream.
@Test
public void testBuffersNotRequestedForEmptyStream() throws IOException, InterruptedException {
ChannelStateSerializer serializer = new ChannelStateSerializerImpl();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = getStream(serializer, 0)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo", 0);
} finally {
assertTrue(handler.requestedBuffers.isEmpty());
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class ChannelStateChunkReaderTest method testBufferRecycledOnFailure.
@Test(expected = TestException.class)
public void testBufferRecycledOnFailure() throws IOException, InterruptedException {
FailingChannelStateSerializer serializer = new FailingChannelStateSerializer();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = getStream(serializer, 10)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo", 0);
} finally {
checkState(serializer.failed);
checkState(!handler.requestedBuffers.isEmpty());
assertTrue(handler.requestedBuffers.stream().allMatch(TestChannelStateByteBuffer::isRecycled));
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class FileUtils method addToZip.
private static void addToZip(Path fileOrDirectory, FileSystem fs, Path rootDir, ZipOutputStream out) throws IOException {
String relativePath = fileOrDirectory.getPath().replace(rootDir.getPath() + '/', "");
if (fs.getFileStatus(fileOrDirectory).isDir()) {
out.putNextEntry(new ZipEntry(relativePath + '/'));
for (FileStatus containedFile : fs.listStatus(fileOrDirectory)) {
addToZip(containedFile.getPath(), fs, rootDir, out);
}
} else {
ZipEntry entry = new ZipEntry(relativePath);
out.putNextEntry(entry);
try (FSDataInputStream in = fs.open(fileOrDirectory)) {
IOUtils.copyBytes(in, out, false);
}
out.closeEntry();
}
}
Aggregations