Search in sources :

Example 16 with RecoverableWriter

use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.

the class GSFileSystemScenarioTest method compoundWriteTest.

/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTest() throws IOException {
    // only run the test for valid chunk sizes
    assumeTrue(writeChunkSizeIsValid);
    // create the options and writer
    GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
    RecoverableWriter writer = new GSRecoverableWriter(storage, options);
    // create a stream
    RecoverableFsDataOutputStream stream = writer.open(path);
    // write 10 arrays of bytes
    final int writeCount = 10;
    // write multiple arrays of bytes to it
    try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
        for (int i = 0; i < writeCount; i++) {
            byte[] data = new byte[128];
            random.nextBytes(data);
            stream.write(data);
            expectedData.write(data);
        }
        // close for commit and commit
        RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
        committer.commit();
        // there should be exactly one blob after commit, with the expected contents.
        // all temporary blobs should be removed.
        assertEquals(1, storage.blobs.size());
        MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
        assertArrayEquals(expectedData.toByteArray(), blobValue.content);
    }
}
Also used : RecoverableWriter(org.apache.flink.core.fs.RecoverableWriter) GSRecoverableWriter(org.apache.flink.fs.gs.writer.GSRecoverableWriter) RecoverableFsDataOutputStream(org.apache.flink.core.fs.RecoverableFsDataOutputStream) GSRecoverableWriter(org.apache.flink.fs.gs.writer.GSRecoverableWriter) MockBlobStorage(org.apache.flink.fs.gs.storage.MockBlobStorage) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Example 17 with RecoverableWriter

use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.

the class GSFileSystemScenarioTest method compoundWriteTestWithRestore.

/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTestWithRestore() throws IOException {
    // only run the test for valid chunk sizes
    assumeTrue(writeChunkSizeIsValid);
    // create the options and writer
    GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
    RecoverableWriter writer = new GSRecoverableWriter(storage, options);
    // create a stream
    RecoverableFsDataOutputStream stream = writer.open(path);
    // write 10 arrays of bytes, but create a restore point after 5 and
    // confirm that we have the proper data after restoring
    final int writeCount = 10;
    final int commitCount = 5;
    // write multiple arrays of bytes to it
    RecoverableWriter.ResumeRecoverable resumeRecoverable = null;
    try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
        for (int i = 0; i < writeCount; i++) {
            byte[] data = new byte[128];
            random.nextBytes(data);
            stream.write(data);
            // array to the expected data
            if (i < commitCount) {
                expectedData.write(data);
            }
            // capture a resume recoverable at the proper point
            if (i == (commitCount - 1)) {
                resumeRecoverable = stream.persist();
            }
        }
        // recover to the commit point
        stream = writer.recover(resumeRecoverable);
        // close for commit and commit
        RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
        committer.commit();
        // there should be exactly one blob after commit, with the expected contents.
        // all temporary blobs should be removed.
        assertEquals(1, storage.blobs.size());
        MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
        assertArrayEquals(expectedData.toByteArray(), blobValue.content);
    }
}
Also used : RecoverableWriter(org.apache.flink.core.fs.RecoverableWriter) GSRecoverableWriter(org.apache.flink.fs.gs.writer.GSRecoverableWriter) RecoverableFsDataOutputStream(org.apache.flink.core.fs.RecoverableFsDataOutputStream) GSRecoverableWriter(org.apache.flink.fs.gs.writer.GSRecoverableWriter) MockBlobStorage(org.apache.flink.fs.gs.storage.MockBlobStorage) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Example 18 with RecoverableWriter

use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.

the class HadoopS3RecoverableWriterExceptionITCase method testResumeWithWrongOffset.

@Test(expected = IOException.class)
public void testResumeWithWrongOffset() throws Exception {
    // this is a rather unrealistic scenario, but it is to trigger
    // truncation of the file and try to resume with missing data.
    final RecoverableWriter writer = getFileSystem().createRecoverableWriter();
    final Path path = new Path(basePathForTest, "part-0");
    final RecoverableFsDataOutputStream stream = writer.open(path);
    stream.write(testData1.getBytes(StandardCharsets.UTF_8));
    final RecoverableWriter.ResumeRecoverable recoverable1 = stream.persist();
    stream.write(testData2.getBytes(StandardCharsets.UTF_8));
    final RecoverableWriter.ResumeRecoverable recoverable2 = stream.persist();
    stream.write(testData3.getBytes(StandardCharsets.UTF_8));
    final RecoverableFsDataOutputStream recoveredStream = writer.recover(recoverable1);
    recoveredStream.closeForCommit().commit();
    // this should throw an exception
    final RecoverableFsDataOutputStream newRecoveredStream = writer.recover(recoverable2);
    newRecoveredStream.closeForCommit().commit();
}
Also used : Path(org.apache.flink.core.fs.Path) RecoverableWriter(org.apache.flink.core.fs.RecoverableWriter) RecoverableFsDataOutputStream(org.apache.flink.core.fs.RecoverableFsDataOutputStream) Test(org.junit.Test)

Example 19 with RecoverableWriter

use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.

the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testWriteAndCommitWorks.

@Test
public void testWriteAndCommitWorks() throws IOException {
    final Path testPath = new Path(basePath, "test-0");
    final String expectedContent = "test_line";
    final RecoverableWriter writerUnderTest = fileSystem.createRecoverableWriter();
    final RecoverableFsDataOutputStream streamUnderTest = getOpenStreamToFileWithContent(writerUnderTest, testPath, expectedContent);
    streamUnderTest.closeForCommit().commit();
    verifyFileContent(testPath, expectedContent);
}
Also used : Path(org.apache.flink.core.fs.Path) RecoverableWriter(org.apache.flink.core.fs.RecoverableWriter) RecoverableFsDataOutputStream(org.apache.flink.core.fs.RecoverableFsDataOutputStream) Test(org.junit.Test)

Example 20 with RecoverableWriter

use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.

the class HadoopViewFileSystemTruncateTest method testViewFileSystemRecoverWorks.

@Test
public void testViewFileSystemRecoverWorks() throws IOException {
    final org.apache.flink.core.fs.Path testPath = new org.apache.flink.core.fs.Path(fSystem.getUri() + "mountOnNn1/test-1");
    final String expectedContent = "test_line";
    final RecoverableWriter writer = fSystem.createRecoverableWriter();
    final RecoverableFsDataOutputStream streamUnderTest = getOpenStreamToFileWithContent(writer, testPath, expectedContent);
    final ResumeRecoverable resumeRecover = streamUnderTest.persist();
    final RecoverableFsDataOutputStream recover = writer.recover(resumeRecover);
    final RecoverableWriter.CommitRecoverable committable = recover.closeForCommit().getRecoverable();
    final RecoverableWriter recoveredWriter = fSystem.createRecoverableWriter();
    recoveredWriter.recoverForCommit(committable).commitAfterRecovery();
    verifyFileContent(testPath, expectedContent);
}
Also used : Path(org.apache.hadoop.fs.Path) RecoverableWriter(org.apache.flink.core.fs.RecoverableWriter) RecoverableFsDataOutputStream(org.apache.flink.core.fs.RecoverableFsDataOutputStream) ResumeRecoverable(org.apache.flink.core.fs.RecoverableWriter.ResumeRecoverable) Test(org.junit.Test)

Aggregations

RecoverableWriter (org.apache.flink.core.fs.RecoverableWriter)24 Test (org.junit.Test)22 RecoverableFsDataOutputStream (org.apache.flink.core.fs.RecoverableFsDataOutputStream)21 Path (org.apache.flink.core.fs.Path)16 Ignore (org.junit.Ignore)4 ResumeRecoverable (org.apache.flink.core.fs.RecoverableWriter.ResumeRecoverable)3 MockBlobStorage (org.apache.flink.fs.gs.storage.MockBlobStorage)3 GSRecoverableWriter (org.apache.flink.fs.gs.writer.GSRecoverableWriter)3 OutputStreamBasedInProgressFileRecoverable (org.apache.flink.streaming.api.functions.sink.filesystem.OutputStreamBasedPartFileWriter.OutputStreamBasedInProgressFileRecoverable)3 OutputStreamBasedPendingFileRecoverable (org.apache.flink.streaming.api.functions.sink.filesystem.OutputStreamBasedPartFileWriter.OutputStreamBasedPendingFileRecoverable)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 CommitRecoverable (org.apache.flink.core.fs.RecoverableWriter.CommitRecoverable)2 S3Recoverable (org.apache.flink.fs.s3.common.writer.S3Recoverable)2 OutputStreamBasedInProgressFileRecoverableSerializer (org.apache.flink.streaming.api.functions.sink.filesystem.OutputStreamBasedPartFileWriter.OutputStreamBasedInProgressFileRecoverableSerializer)2 OutputStreamBasedPendingFileRecoverableSerializer (org.apache.flink.streaming.api.functions.sink.filesystem.OutputStreamBasedPartFileWriter.OutputStreamBasedPendingFileRecoverableSerializer)2 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 FSDataInputStream (org.apache.flink.core.fs.FSDataInputStream)1 GSBlobIdentifier (org.apache.flink.fs.gs.storage.GSBlobIdentifier)1 InProgressFileRecoverable (org.apache.flink.streaming.api.functions.sink.filesystem.InProgressFileWriter.InProgressFileRecoverable)1