use of org.apache.flink.fs.gs.writer.GSRecoverableWriter in project flink by apache.
the class GSFileSystemScenarioTest method simpleWriteTest.
/* Test writing a single array of bytes to a stream. */
@Test
public void simpleWriteTest() throws IOException {
// only run the test for valid chunk sizes
assumeTrue(writeChunkSizeIsValid);
// create the options and writer
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
RecoverableWriter writer = new GSRecoverableWriter(storage, options);
// create a stream and write some random bytes to it
RecoverableFsDataOutputStream stream = writer.open(path);
byte[] data = new byte[128];
random.nextBytes(data);
stream.write(data);
// close for commit
RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
// there should be a single blob now, in the specified temporary bucket or, if no temporary
// bucket
// specified, in the final bucket
assertEquals(1, storage.blobs.size());
GSBlobIdentifier temporaryBlobIdentifier = (GSBlobIdentifier) storage.blobs.keySet().toArray()[0];
String expectedTemporaryBucket = StringUtils.isNullOrWhitespaceOnly(temporaryBucketName) ? blobIdentifier.bucketName : temporaryBucketName;
assertEquals(expectedTemporaryBucket, temporaryBlobIdentifier.bucketName);
// commit
committer.commit();
// there should be exactly one blob after commit, with the expected contents.
// all temporary blobs should be removed.
assertEquals(1, storage.blobs.size());
MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
assertArrayEquals(data, blobValue.content);
}
use of org.apache.flink.fs.gs.writer.GSRecoverableWriter in project flink by apache.
the class GSFileSystemScenarioTest method compoundWriteTest.
/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTest() throws IOException {
// only run the test for valid chunk sizes
assumeTrue(writeChunkSizeIsValid);
// create the options and writer
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
RecoverableWriter writer = new GSRecoverableWriter(storage, options);
// create a stream
RecoverableFsDataOutputStream stream = writer.open(path);
// write 10 arrays of bytes
final int writeCount = 10;
// write multiple arrays of bytes to it
try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
for (int i = 0; i < writeCount; i++) {
byte[] data = new byte[128];
random.nextBytes(data);
stream.write(data);
expectedData.write(data);
}
// close for commit and commit
RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
committer.commit();
// there should be exactly one blob after commit, with the expected contents.
// all temporary blobs should be removed.
assertEquals(1, storage.blobs.size());
MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
assertArrayEquals(expectedData.toByteArray(), blobValue.content);
}
}
use of org.apache.flink.fs.gs.writer.GSRecoverableWriter in project flink by apache.
the class GSFileSystemScenarioTest method compoundWriteTestWithRestore.
/* Test writing multiple arrays of bytes to a stream. */
@Test
public void compoundWriteTestWithRestore() throws IOException {
// only run the test for valid chunk sizes
assumeTrue(writeChunkSizeIsValid);
// create the options and writer
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
RecoverableWriter writer = new GSRecoverableWriter(storage, options);
// create a stream
RecoverableFsDataOutputStream stream = writer.open(path);
// write 10 arrays of bytes, but create a restore point after 5 and
// confirm that we have the proper data after restoring
final int writeCount = 10;
final int commitCount = 5;
// write multiple arrays of bytes to it
RecoverableWriter.ResumeRecoverable resumeRecoverable = null;
try (ByteArrayOutputStream expectedData = new ByteArrayOutputStream()) {
for (int i = 0; i < writeCount; i++) {
byte[] data = new byte[128];
random.nextBytes(data);
stream.write(data);
// array to the expected data
if (i < commitCount) {
expectedData.write(data);
}
// capture a resume recoverable at the proper point
if (i == (commitCount - 1)) {
resumeRecoverable = stream.persist();
}
}
// recover to the commit point
stream = writer.recover(resumeRecoverable);
// close for commit and commit
RecoverableFsDataOutputStream.Committer committer = stream.closeForCommit();
committer.commit();
// there should be exactly one blob after commit, with the expected contents.
// all temporary blobs should be removed.
assertEquals(1, storage.blobs.size());
MockBlobStorage.BlobValue blobValue = storage.blobs.get(blobIdentifier);
assertArrayEquals(expectedData.toByteArray(), blobValue.content);
}
}
use of org.apache.flink.fs.gs.writer.GSRecoverableWriter in project flink by apache.
the class GSFileSystem method createRecoverableWriter.
@Override
public RecoverableWriter createRecoverableWriter() {
LOGGER.info("Creating GSRecoverableWriter with file-system options {}", fileSystemOptions);
// create the GS blob storage wrapper
GSBlobStorageImpl blobStorage = new GSBlobStorageImpl(storage);
// construct the recoverable writer with the blob storage wrapper and the options
return new GSRecoverableWriter(blobStorage, fileSystemOptions);
}
Aggregations