use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class GSFileSystemScenarioTest method compoundWriteTest.
/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTest() throws IOException {
// only run the test for valid chunk sizes
assumeTrue(writeChunkSizeIsValid);
// create the options and writer
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
RecoverableWriter writer = new GSRecoverableWriter(storage, options);
// create a stream
RecoverableFsDataOutputStream stream = writer.open(path);
// write 10 arrays of bytes
final int writeCount = 10;
// write multiple arrays of bytes to it
try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
for (int i = 0; i < writeCount; i++) {
byte[] data = new byte[128];
random.nextBytes(data);
stream.write(data);
expectedData.write(data);
}
// close for commit and commit
RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
committer.commit();
// there should be exactly one blob after commit, with the expected contents.
// all temporary blobs should be removed.
assertEquals(1, storage.blobs.size());
MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
assertArrayEquals(expectedData.toByteArray(), blobValue.content);
}
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class GSFileSystemScenarioTest method compoundWriteTestWithRestore.
/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTestWithRestore() throws IOException {
// only run the test for valid chunk sizes
assumeTrue(writeChunkSizeIsValid);
// create the options and writer
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
RecoverableWriter writer = new GSRecoverableWriter(storage, options);
// create a stream
RecoverableFsDataOutputStream stream = writer.open(path);
// write 10 arrays of bytes, but create a restore point after 5 and
// confirm that we have the proper data after restoring
final int writeCount = 10;
final int commitCount = 5;
// write multiple arrays of bytes to it
RecoverableWriter.ResumeRecoverable resumeRecoverable = null;
try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
for (int i = 0; i < writeCount; i++) {
byte[] data = new byte[128];
random.nextBytes(data);
stream.write(data);
// array to the expected data
if (i < commitCount) {
expectedData.write(data);
}
// capture a resume recoverable at the proper point
if (i == (commitCount - 1)) {
resumeRecoverable = stream.persist();
}
}
// recover to the commit point
stream = writer.recover(resumeRecoverable);
// close for commit and commit
RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
committer.commit();
// there should be exactly one blob after commit, with the expected contents.
// all temporary blobs should be removed.
assertEquals(1, storage.blobs.size());
MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
assertArrayEquals(expectedData.toByteArray(), blobValue.content);
}
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopS3RecoverableWriterExceptionITCase method testResumeWithWrongOffset.
@Test(expected = IOException.class)
public void testResumeWithWrongOffset() throws Exception {
// this is a rather unrealistic scenario, but it is to trigger
// truncation of the file and try to resume with missing data.
final RecoverableWriter writer = getFileSystem().createRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.write(testData1.getBytes(StandardCharsets.UTF_8));
final RecoverableWriter.ResumeRecoverable recoverable1 = stream.persist();
stream.write(testData2.getBytes(StandardCharsets.UTF_8));
final RecoverableWriter.ResumeRecoverable recoverable2 = stream.persist();
stream.write(testData3.getBytes(StandardCharsets.UTF_8));
final RecoverableFsDataOutputStream recoveredStream = writer.recover(recoverable1);
recoveredStream.closeForCommit().commit();
// this should throw an exception
final RecoverableFsDataOutputStream newRecoveredStream = writer.recover(recoverable2);
newRecoveredStream.closeForCommit().commit();
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testWriteAndCommitWorks.
@Test
public void testWriteAndCommitWorks() throws IOException {
final Path testPath = new Path(basePath, "test-0");
final String expectedContent = "test_line";
final RecoverableWriter writerUnderTest = fileSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream streamUnderTest = getOpenStreamToFileWithContent(writerUnderTest, testPath, expectedContent);
streamUnderTest.closeForCommit().commit();
verifyFileContent(testPath, expectedContent);
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopViewFileSystemTruncateTest method testViewFileSystemRecoverWorks.
@Test
public void testViewFileSystemRecoverWorks() throws IOException {
final org.apache.flink.core.fs.Path testPath = new org.apache.flink.core.fs.Path(fSystem.getUri() + "mountOnNn1/test-1");
final String expectedContent = "test_line";
final RecoverableWriter writer = fSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream streamUnderTest = getOpenStreamToFileWithContent(writer, testPath, expectedContent);
final ResumeRecoverable resumeRecover = streamUnderTest.persist();
final RecoverableFsDataOutputStream recover = writer.recover(resumeRecover);
final RecoverableWriter.CommitRecoverable committable = recover.closeForCommit().getRecoverable();
final RecoverableWriter recoveredWriter = fSystem.createRecoverableWriter();
recoveredWriter.recoverForCommit(committable).commitAfterRecovery();
verifyFileContent(testPath, expectedContent);
}
Aggregations