use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class HeapRestoreOperation method restore.
@Override
public Void restore() throws Exception {
registeredKVStates.clear();
registeredPQStates.clear();
boolean keySerializerRestored = false;
for (KeyedStateHandle keyedStateHandle : restoreStateHandles) {
if (keyedStateHandle == null) {
continue;
}
if (!(keyedStateHandle instanceof KeyGroupsStateHandle)) {
throw unexpectedStateHandleException(KeyGroupsStateHandle.class, keyedStateHandle.getClass());
}
LOG.info("Starting to restore from state handle: {}.", keyedStateHandle);
KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) keyedStateHandle;
FSDataInputStream fsDataInputStream = keyGroupsStateHandle.openInputStream();
cancelStreamRegistry.registerCloseable(fsDataInputStream);
try {
DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(fsDataInputStream);
KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader);
serializationProxy.read(inView);
if (!keySerializerRestored) {
// fetch current serializer now because if it is incompatible, we can't access
// it anymore to improve the error message
TypeSerializer<K> currentSerializer = keySerializerProvider.currentSchemaSerializer();
// check for key serializer compatibility; this also reconfigures the
// key serializer to be compatible, if it is required and is possible
TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot());
if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) {
throw new StateMigrationException("The new key serializer (" + currentSerializer + ") must be compatible with the previous key serializer (" + keySerializerProvider.previousSchemaSerializer() + ").");
}
keySerializerRestored = true;
}
List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots();
final Map<Integer, StateMetaInfoSnapshot> kvStatesById = this.heapMetaInfoRestoreOperation.createOrCheckStateForMetaInfo(restoredMetaInfos, registeredKVStates, registeredPQStates);
readStateHandleStateData(fsDataInputStream, inView, keyGroupsStateHandle.getGroupRangeOffsets(), kvStatesById, restoredMetaInfos.size(), serializationProxy.getReadVersion(), serializationProxy.isUsingKeyGroupCompression());
LOG.info("Finished restoring from state handle: {}.", keyedStateHandle);
} finally {
if (cancelStreamRegistry.unregisterCloseable(fsDataInputStream)) {
IOUtils.closeQuietly(fsDataInputStream);
}
}
}
return null;
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class MetadataV3SerializerTest method testSerializeKeyGroupsStateHandle.
@Test
public void testSerializeKeyGroupsStateHandle() throws IOException {
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(0, 123);
byte[] data = { 1, 2, 3, 4 };
try (ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos()) {
MetadataV2V3SerializerBase.serializeStreamStateHandle(new KeyGroupsStateHandle(offsets, new ByteStreamStateHandle("test", data)), new DataOutputStream(out));
try (ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray())) {
StreamStateHandle handle = MetadataV2V3SerializerBase.deserializeStreamStateHandle(new DataInputStream(in), null);
assertTrue(handle instanceof KeyGroupsStateHandle);
assertEquals(offsets, ((KeyGroupsStateHandle) handle).getGroupRangeOffsets());
byte[] deserialized = new byte[data.length];
try (FSDataInputStream dataStream = handle.openInputStream()) {
dataStream.read(deserialized);
assertArrayEquals(data, deserialized);
}
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class GenericWriteAheadSink method notifyCheckpointComplete.
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
synchronized (pendingCheckpoints) {
Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator();
while (pendingCheckpointIt.hasNext()) {
PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next();
long pastCheckpointId = pendingCheckpoint.checkpointId;
int subtaskId = pendingCheckpoint.subtaskId;
long timestamp = pendingCheckpoint.timestamp;
StreamStateHandle streamHandle = pendingCheckpoint.stateHandle;
if (pastCheckpointId <= checkpointId) {
try {
if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) {
try (FSDataInputStream in = streamHandle.openInputStream()) {
boolean success = sendValues(new ReusingMutableToRegularIteratorWrapper<>(new InputViewIterator<>(new DataInputViewStreamWrapper(in), serializer), serializer), pastCheckpointId, timestamp);
if (success) {
// in case the checkpoint was successfully committed,
// discard its state from the backend and mark it for removal
// in case it failed, we retry on the next checkpoint
committer.commitCheckpoint(subtaskId, pastCheckpointId);
streamHandle.discardState();
pendingCheckpointIt.remove();
}
}
} else {
streamHandle.discardState();
pendingCheckpointIt.remove();
}
} catch (Exception e) {
// we have to break here to prevent a new (later) checkpoint
// from being committed before this one
LOG.error("Could not commit checkpoint.", e);
break;
}
}
}
}
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class ChannelStateChunkReaderTest method testNoSeekUnnecessarily.
@Test
public void testNoSeekUnnecessarily() throws IOException, InterruptedException {
final int offset = 123;
final FSDataInputStream stream = new FSDataInputStream() {
@Override
public long getPos() {
return offset;
}
@Override
public void seek(long ignored) {
fail();
}
@Override
public int read() {
return 0;
}
};
new ChannelStateChunkReader(new ChannelStateSerializerImpl()).readChunk(stream, offset, new TestRecoveredChannelStateHandler(), "channelInfo", 0);
}
use of org.apache.flink.core.fs.FSDataInputStream in project flink by apache.
the class CheckpointStreamWithResultProviderTest method testCloseAndFinalizeCheckpointStreamResultPrimaryAndSecondary.
@Test
public void testCloseAndFinalizeCheckpointStreamResultPrimaryAndSecondary() throws Exception {
CheckpointStreamFactory primaryFactory = createCheckpointStreamFactory();
LocalRecoveryDirectoryProvider directoryProvider = createLocalRecoveryDirectoryProvider();
CheckpointStreamWithResultProvider resultProvider = CheckpointStreamWithResultProvider.createDuplicatingStream(42L, CheckpointedStateScope.EXCLUSIVE, primaryFactory, directoryProvider);
SnapshotResult<StreamStateHandle> result = writeCheckpointTestData(resultProvider);
Assert.assertNotNull(result.getJobManagerOwnedSnapshot());
Assert.assertNotNull(result.getTaskLocalSnapshot());
try (FSDataInputStream inputStream = result.getJobManagerOwnedSnapshot().openInputStream()) {
Assert.assertEquals(0x42, inputStream.read());
Assert.assertEquals(-1, inputStream.read());
}
try (FSDataInputStream inputStream = result.getTaskLocalSnapshot().openInputStream()) {
Assert.assertEquals(0x42, inputStream.read());
Assert.assertEquals(-1, inputStream.read());
}
}
Aggregations