use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class CheckpointStateOutputStreamTest method testWriteAndRead.
/**
* Simple write and read test.
*/
@Test
public void testWriteAndRead() throws Exception {
final FileSystem fs = FileSystem.getLocalFileSystem();
final Path folder = baseFolder();
final String fileName = "fooBarName";
final Random rnd = new Random();
final byte[] data = new byte[1694523];
// write the data (mixed single byte writes and array writes)
final FileStateHandle handle;
try (FSDataOutputStream stream = createTestStream(fs, folder, fileName)) {
for (int i = 0; i < data.length; ) {
if (rnd.nextBoolean()) {
stream.write(data[i++]);
} else {
int len = rnd.nextInt(Math.min(data.length - i, 32));
stream.write(data, i, len);
i += len;
}
}
handle = closeAndGetResult(stream);
}
// (1) stream from handle must hold the contents
try (FSDataInputStream in = handle.openInputStream()) {
byte[] buffer = new byte[data.length];
readFully(in, buffer);
assertArrayEquals(data, buffer);
}
// (2) the pointer must point to a file with that contents
try (FSDataInputStream in = fs.open(handle.getFilePath())) {
byte[] buffer = new byte[data.length];
readFully(in, buffer);
assertArrayEquals(data, buffer);
}
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class CheckpointStateOutputStreamTest method testCloseDoesNotLock.
/**
* This test validates that a close operation can happen even while a 'closeAndGetHandle()' call
* is in progress.
*
* <p>That behavior is essential for fast cancellation (concurrent cleanup).
*/
@Test
public void testCloseDoesNotLock() throws Exception {
final Path folder = new Path(tmp.newFolder().toURI());
final String fileName = "this-is-ignored-anyways.file";
final FileSystem fileSystem = spy(new FsWithoutRecoverableWriter((path) -> new BlockerStream()));
final FSDataOutputStream checkpointStream = createTestStream(fileSystem, folder, fileName);
final OneShotLatch sync = new OneShotLatch();
final CheckedThread thread = new CheckedThread() {
@Override
public void go() throws Exception {
sync.trigger();
// that call should now block, because it accesses the position
closeAndGetResult(checkpointStream);
}
};
thread.start();
sync.await();
checkpointStream.close();
// it is not important for this test, important is that the thread does not freeze/lock up
try {
thread.sync();
} catch (IOException ignored) {
}
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class FsCheckpointStateOutputStreamTest method testCleanupWhenFailingCloseAndGetHandle.
/**
* Tests that the underlying stream file is deleted if the closeAndGetHandle method fails.
*/
@Test
public void testCleanupWhenFailingCloseAndGetHandle() throws IOException {
final FileSystem fs = mock(FileSystem.class);
final FSDataOutputStream outputStream = mock(FSDataOutputStream.class);
final ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
when(fs.create(pathCaptor.capture(), any(FileSystem.WriteMode.class))).thenReturn(outputStream);
doThrow(new IOException("Test IOException.")).when(outputStream).close();
CheckpointStateOutputStream stream = new FsCheckpointStreamFactory.FsCheckpointStateOutputStream(Path.fromLocalFile(tempDir.newFolder()), fs, 4, 0, relativePaths);
// this should create the underlying file stream
stream.write(new byte[] { 1, 2, 3, 4, 5 });
verify(fs).create(any(Path.class), any(FileSystem.WriteMode.class));
try {
stream.closeAndGetHandle();
fail("Expected IOException");
} catch (IOException ioE) {
// expected exception
}
verify(fs).delete(eq(pathCaptor.getValue()), anyBoolean());
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class AbstractFileCheckpointStorageAccessTestBase method testPointerPathResolution.
// ------------------------------------------------------------------------
// pointers
// ------------------------------------------------------------------------
@Test
public void testPointerPathResolution() throws Exception {
final FileSystem fs = FileSystem.getLocalFileSystem();
final Path metadataFile = new Path(Path.fromLocalFile(tmp.newFolder()), AbstractFsCheckpointStorageAccess.METADATA_FILE_NAME);
final String basePointer = metadataFile.getParent().toString();
final String pointer1 = metadataFile.toString();
final String pointer2 = metadataFile.getParent().toString();
final String pointer3 = metadataFile.getParent().toString() + '/';
// create the storage for some random checkpoint directory
final CheckpointStorageAccess storage = createCheckpointStorage(randomTempPath());
final byte[] data = new byte[23686];
new Random().nextBytes(data);
try (FSDataOutputStream out = fs.create(metadataFile, WriteMode.NO_OVERWRITE)) {
out.write(data);
}
CompletedCheckpointStorageLocation completed1 = storage.resolveCheckpoint(pointer1);
CompletedCheckpointStorageLocation completed2 = storage.resolveCheckpoint(pointer2);
CompletedCheckpointStorageLocation completed3 = storage.resolveCheckpoint(pointer3);
assertEquals(basePointer, completed1.getExternalPointer());
assertEquals(basePointer, completed2.getExternalPointer());
assertEquals(basePointer, completed3.getExternalPointer());
StreamStateHandle handle1 = completed1.getMetadataHandle();
StreamStateHandle handle2 = completed2.getMetadataHandle();
StreamStateHandle handle3 = completed3.getMetadataHandle();
assertNotNull(handle1);
assertNotNull(handle2);
assertNotNull(handle3);
validateContents(handle1, data);
validateContents(handle2, data);
validateContents(handle3, data);
}
use of org.apache.flink.core.fs.FSDataOutputStream in project flink by apache.
the class SavepointStore method storeSavepointToHandle.
/**
* Stores the savepoint metadata file to a state handle.
*
* @param directory Target directory to store savepoint in
* @param savepoint Savepoint to be stored
*
* @return State handle to the checkpoint metadata
* @throws IOException Failures during store are forwarded
*/
static <T extends Savepoint> FileStateHandle storeSavepointToHandle(String directory, String filename, T savepoint) throws IOException {
checkNotNull(directory, "Target directory");
checkNotNull(savepoint, "Savepoint");
final Path basePath = new Path(directory);
final Path metadataFilePath = new Path(basePath, filename);
final FileSystem fs = FileSystem.get(basePath.toUri());
boolean success = false;
try (FSDataOutputStream fdos = fs.create(metadataFilePath, WriteMode.NO_OVERWRITE);
DataOutputStream dos = new DataOutputStream(fdos)) {
// Write header
dos.writeInt(MAGIC_NUMBER);
dos.writeInt(savepoint.getVersion());
// Write savepoint
SavepointSerializer<T> serializer = SavepointSerializers.getSerializer(savepoint);
serializer.serialize(savepoint, dos);
// construct result handle
FileStateHandle handle = new FileStateHandle(metadataFilePath, dos.size());
// all good!
success = true;
return handle;
} finally {
if (!success && fs.exists(metadataFilePath)) {
if (!fs.delete(metadataFilePath, true)) {
LOG.warn("Failed to delete file {} after failed metadata write.", metadataFilePath);
}
}
}
}
Aggregations