use of org.apache.flink.fs.s3.common.writer.S3Recoverable in project flink by apache.
the class HadoopS3RecoverableWriterITCase method testCallingDeleteObjectTwiceDoesNotThroughException.
@Test
public void testCallingDeleteObjectTwiceDoesNotThroughException() throws Exception {
final RecoverableWriter writer = getRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.write(bytesOf(testData1));
S3Recoverable recoverable = (S3Recoverable) stream.persist();
stream.closeForCommit().commit();
// still the data is there as we have not deleted them from the tmp object
final String content = getContentsOfFile(new Path('/' + recoverable.incompleteObjectName()));
Assert.assertEquals(testData1, content);
boolean successfullyDeletedState = writer.cleanupRecoverableState(recoverable);
Assert.assertTrue(successfullyDeletedState);
boolean unsuccessfulDeletion = writer.cleanupRecoverableState(recoverable);
Assert.assertFalse(unsuccessfulDeletion);
}
use of org.apache.flink.fs.s3.common.writer.S3Recoverable in project flink by apache.
the class RecoverableMultiPartUploadImplTest method uploadObject.
private S3Recoverable uploadObject(byte[] content) throws IOException {
final RefCountedBufferingFileStream incompletePartFile = writeContent(content);
incompletePartFile.flush();
return multiPartUploadUnderTest.snapshotAndGetRecoverable(incompletePartFile);
}
use of org.apache.flink.fs.s3.common.writer.S3Recoverable in project flink by apache.
the class S3RecoverableMultipartUploadFactory method recoverInProgressPart.
private Optional<File> recoverInProgressPart(S3Recoverable recoverable) throws IOException {
final String objectKey = recoverable.incompleteObjectName();
if (objectKey == null) {
return Optional.empty();
}
// download the file (simple way)
final RefCountedFileWithStream refCountedFile = tmpFileSupplier.apply(null);
final File file = refCountedFile.getFile();
final long numBytes = s3AccessHelper.getObject(objectKey, file);
if (numBytes != recoverable.incompleteObjectLength()) {
throw new IOException(String.format("Error recovering writer: " + "Downloading the last data chunk file gives incorrect length." + "File length is %d bytes, RecoveryData indicates %d bytes", numBytes, recoverable.incompleteObjectLength()));
}
return Optional.of(file);
}
use of org.apache.flink.fs.s3.common.writer.S3Recoverable in project flink by apache.
the class HadoopS3RecoverableWriterITCase method testCleanupRecoverableState.
@Test(expected = FileNotFoundException.class)
public void testCleanupRecoverableState() throws Exception {
final RecoverableWriter writer = getRecoverableWriter();
final Path path = new Path(basePathForTest, "part-0");
final RecoverableFsDataOutputStream stream = writer.open(path);
stream.write(bytesOf(testData1));
S3Recoverable recoverable = (S3Recoverable) stream.persist();
stream.closeForCommit().commit();
// still the data is there as we have not deleted them from the tmp object
final String content = getContentsOfFile(new Path('/' + recoverable.incompleteObjectName()));
Assert.assertEquals(testData1, content);
boolean successfullyDeletedState = writer.cleanupRecoverableState(recoverable);
Assert.assertTrue(successfullyDeletedState);
int retryTimes = 10;
final long delayMs = 1000;
// So we try multi-times to verify that the file was deleted at last.
while (retryTimes > 0) {
// this should throw the exception as we deleted the file.
getContentsOfFile(new Path('/' + recoverable.incompleteObjectName()));
retryTimes--;
Thread.sleep(delayMs);
}
}
Aggregations