use of org.apache.flink.core.fs.RecoverableFsDataOutputStream in project flink by apache.
the class FileSinkCommittableSerializerMigrationTest method prepareDeserializationPending.
@Test
@Ignore
public void prepareDeserializationPending() throws IOException {
String scenario = "pending";
java.nio.file.Path path = resolveVersionPath(CURRENT_VERSION, scenario);
BucketWriter<String, String> bucketWriter = createBucketWriter();
RecoverableWriter writer = FileSystem.getLocalFileSystem().createRecoverableWriter();
FileSinkCommittableSerializer serializer = new FileSinkCommittableSerializer(bucketWriter.getProperties().getPendingFileRecoverableSerializer(), bucketWriter.getProperties().getInProgressFileRecoverableSerializer());
RecoverableFsDataOutputStream outputStream = writer.open(new Path(path.resolve("content").toString()));
outputStream.write(PENDING_CONTENT.getBytes(StandardCharsets.UTF_8));
CommitRecoverable commitRecoverable = outputStream.closeForCommit().getRecoverable();
OutputStreamBasedPendingFileRecoverable recoverable = new OutputStreamBasedPendingFileRecoverable(commitRecoverable);
FileSinkCommittable committable = new FileSinkCommittable("0", recoverable);
byte[] bytes = serializer.serialize(committable);
Files.write(path.resolve("committable"), bytes);
}
use of org.apache.flink.core.fs.RecoverableFsDataOutputStream in project flink by apache.
the class CompactOperator method doSingleFileMove.
private boolean doSingleFileMove(Path src, Path dst) throws IOException {
// We can not rename, because we need to keep original file for failover
RecoverableWriter writer;
try {
writer = fileSystem.createRecoverableWriter();
} catch (UnsupportedOperationException ignore) {
// writing.
return false;
}
RecoverableFsDataOutputStream out = writer.open(dst);
try (FSDataInputStream in = fileSystem.open(src)) {
IOUtils.copyBytes(in, out, false);
} catch (Throwable t) {
out.close();
throw t;
}
out.closeForCommit().commit();
return true;
}
use of org.apache.flink.core.fs.RecoverableFsDataOutputStream in project flink by apache.
the class OutputStreamBasedPartFileRecoverableMigrationTest method prepareDeserializationInProgress.
@Test
@Ignore
public void prepareDeserializationInProgress() throws IOException {
String scenario = "in-progress";
java.nio.file.Path path = resolveVersionPath(CURRENT_VERSION, scenario);
RecoverableWriter writer = FileSystem.getLocalFileSystem().createRecoverableWriter();
OutputStreamBasedInProgressFileRecoverableSerializer serializer = new OutputStreamBasedInProgressFileRecoverableSerializer(writer.getResumeRecoverableSerializer());
RecoverableFsDataOutputStream outputStream = writer.open(new Path(path.resolve("content").toString()));
outputStream.write(IN_PROGRESS_CONTENT.getBytes(StandardCharsets.UTF_8));
ResumeRecoverable resumeRecoverable = outputStream.persist();
OutputStreamBasedInProgressFileRecoverable recoverable = new OutputStreamBasedInProgressFileRecoverable(resumeRecoverable);
byte[] bytes = serializer.serialize(recoverable);
Files.write(path.resolve("recoverable"), bytes);
}
use of org.apache.flink.core.fs.RecoverableFsDataOutputStream in project flink by apache.
the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testRecoveryAfterClosingForCommitWorks.
@Test
public void testRecoveryAfterClosingForCommitWorks() throws IOException {
final Path testPath = new Path(basePath, "test-1");
final String expectedContent = "test_line";
final RecoverableWriter writerUnderTest = fileSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream streamUnderTest = getOpenStreamToFileWithContent(writerUnderTest, testPath, expectedContent);
final RecoverableWriter.CommitRecoverable committable = streamUnderTest.closeForCommit().getRecoverable();
writerUnderTest.recoverForCommit(committable).commitAfterRecovery();
verifyFileContent(testPath, expectedContent);
}
use of org.apache.flink.core.fs.RecoverableFsDataOutputStream in project flink by apache.
the class HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest method testExceptionThrownWhenRecoveringWithInProgressFile.
@Test
public void testExceptionThrownWhenRecoveringWithInProgressFile() throws IOException {
final RecoverableWriter writerUnderTest = fileSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream stream = writerUnderTest.open(new Path(basePath, "test-2"));
final RecoverableWriter.ResumeRecoverable recoverable = stream.persist();
assertNotNull(recoverable);
try {
writerUnderTest.recover(recoverable);
} catch (IOException e) {
// this is the expected exception and we check also if the root cause is the hadoop <
// 2.7 version
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
Aggregations