use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testExceptionThrownWhenRecoveringWithInProgressFile.
@Test
public void testExceptionThrownWhenRecoveringWithInProgressFile() throws IOException {
final RecoverableWriter writerUnderTest = fileSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream stream = writerUnderTest.open(new Path(basePath, "test-2"));
final RecoverableWriter.ResumeRecoverable recoverable = stream.persist();
assertNotNull(recoverable);
try {
writerUnderTest.recover(recoverable);
} catch (IOException e) {
// this is the expected exception and we check also if the root cause is the hadoop <
// 2.7 version
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testRecoverableWriterWithViewfsScheme.
@Test
public void testRecoverableWriterWithViewfsScheme() {
final org.apache.hadoop.fs.FileSystem mockViewfs = Mockito.mock(org.apache.hadoop.fs.FileSystem.class);
when(mockViewfs.getScheme()).thenReturn("viewfs");
// Creating the writer should not throw UnsupportedOperationException.
RecoverableWriter recoverableWriter = new HadoopRecoverableWriter(mockViewfs);
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopS3RecoverableWriterExceptionITCase method testResumeAfterCommit.
// IMPORTANT FOR THE FOLLOWING TWO TESTS:
// These tests illustrate a difference in the user-perceived behavior of the different writers.
// In HDFS this will fail when trying to recover the stream while here is will fail at "commit",
// i.e.
// when we try to "publish" the multipart upload and we realize that the MPU is no longer
// active.
@Test(expected = IOException.class)
public void testResumeAfterCommit() throws Exception {
final RecoverableWriter writer = getFileSystem().createRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.write(testData1.getBytes(StandardCharsets.UTF_8));
final RecoverableWriter.ResumeRecoverable recoverable = stream.persist();
stream.write(testData2.getBytes(StandardCharsets.UTF_8));
stream.closeForCommit().commit();
final RecoverableFsDataOutputStream recoveredStream = writer.recover(recoverable);
recoveredStream.closeForCommit().commit();
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopS3RecoverableWriterITCase method testCommitAfterPersist.
@Test
public void testCommitAfterPersist() throws Exception {
final RecoverableWriter writer = getRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.write(bytesOf(testData1));
stream.persist();
stream.write(bytesOf(testData2));
stream.closeForCommit().commit();
Assert.assertEquals(testData1 + testData2, getContentsOfFile(path));
}
use of org.apache.flink.core.fs.RecoverableWriter in project flink by apache.
the class HadoopS3RecoverableWriterITCase method testCloseWithNoData.
// ----------------------- Test Normal Execution -----------------------
@Test
public void testCloseWithNoData() throws Exception {
final RecoverableWriter writer = getRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.closeForCommit().commit();
}
Aggregations