use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class DefaultOperatorStateBackend method restore.
@Override
public void restore(Collection<OperatorStateHandle> restoreSnapshots) throws Exception {
if (null == restoreSnapshots) {
return;
}
for (OperatorStateHandle stateHandle : restoreSnapshots) {
if (stateHandle == null) {
continue;
}
FSDataInputStream in = stateHandle.openInputStream();
closeStreamOnCancelRegistry.registerClosable(in);
ClassLoader restoreClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
OperatorBackendSerializationProxy backendSerializationProxy = new OperatorBackendSerializationProxy(userClassloader);
backendSerializationProxy.read(new DataInputViewStreamWrapper(in));
List<OperatorBackendSerializationProxy.StateMetaInfo<?>> metaInfoList = backendSerializationProxy.getNamedStateSerializationProxies();
// Recreate all PartitionableListStates from the meta info
for (OperatorBackendSerializationProxy.StateMetaInfo<?> stateMetaInfo : metaInfoList) {
PartitionableListState<?> listState = registeredStates.get(stateMetaInfo.getName());
if (null == listState) {
listState = new PartitionableListState<>(stateMetaInfo.getName(), stateMetaInfo.getStateSerializer(), stateMetaInfo.getMode());
registeredStates.put(listState.getName(), listState);
} else {
Preconditions.checkState(listState.getPartitionStateSerializer().isCompatibleWith(stateMetaInfo.getStateSerializer()), "Incompatible state serializers found: " + listState.getPartitionStateSerializer() + " is not compatible with " + stateMetaInfo.getStateSerializer());
}
}
// Restore all the state in PartitionableListStates
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> nameToOffsets : stateHandle.getStateNameToPartitionOffsets().entrySet()) {
PartitionableListState<?> stateListForName = registeredStates.get(nameToOffsets.getKey());
Preconditions.checkState(null != stateListForName, "Found state without " + "corresponding meta info: " + nameToOffsets.getKey());
deserializeStateValues(stateListForName, in, nameToOffsets.getValue());
}
} finally {
Thread.currentThread().setContextClassLoader(restoreClassLoader);
closeStreamOnCancelRegistry.unregisterClosable(in);
IOUtils.closeQuietly(in);
}
}
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class NFA method readComputationState.
@SuppressWarnings("unchecked")
private ComputationState<T> readComputationState(ObjectInputStream ois) throws IOException, ClassNotFoundException {
final State<T> state = (State<T>) ois.readObject();
final long timestamp = ois.readLong();
final DeweyNumber version = (DeweyNumber) ois.readObject();
final long startTimestamp = ois.readLong();
final boolean hasEvent = ois.readBoolean();
final T event;
if (hasEvent) {
DataInputViewStreamWrapper input = new DataInputViewStreamWrapper(ois);
event = nonDuplicatingTypeSerializer.deserialize(input);
} else {
event = null;
}
return new ComputationState<>(state, event, timestamp, version, startTimestamp);
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class SharedBuffer method readObject.
private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException {
DataInputViewStreamWrapper source = new DataInputViewStreamWrapper(ois);
ArrayList<SharedBufferEntry<K, V>> entryList = new ArrayList<>();
ois.defaultReadObject();
this.pages = new HashMap<>();
int numberPages = ois.readInt();
for (int i = 0; i < numberPages; i++) {
// key of the page
@SuppressWarnings("unchecked") K key = (K) ois.readObject();
SharedBufferPage<K, V> page = new SharedBufferPage<>(key);
pages.put(key, page);
int numberEntries = ois.readInt();
for (int j = 0; j < numberEntries; j++) {
// restore the SharedBufferEntries for the given page
V value = valueSerializer.deserialize(source);
long timestamp = ois.readLong();
ValueTimeWrapper<V> valueTimeWrapper = new ValueTimeWrapper<>(value, timestamp);
SharedBufferEntry<K, V> sharedBufferEntry = new SharedBufferEntry<K, V>(valueTimeWrapper, page);
sharedBufferEntry.referenceCounter = ois.readInt();
page.entries.put(valueTimeWrapper, sharedBufferEntry);
entryList.add(sharedBufferEntry);
}
}
// read the edges of the shared buffer entries
int numberEdges = ois.readInt();
for (int j = 0; j < numberEdges; j++) {
int sourceIndex = ois.readInt();
int targetIndex = ois.readInt();
if (sourceIndex >= entryList.size() || sourceIndex < 0) {
throw new RuntimeException("Could not find source entry with index " + sourceIndex + ". This indicates a corrupted state.");
} else {
// We've already deserialized the shared buffer entry. Simply read its ID and
// retrieve the buffer entry from the list of entries
SharedBufferEntry<K, V> sourceEntry = entryList.get(sourceIndex);
final DeweyNumber version = (DeweyNumber) ois.readObject();
final SharedBufferEntry<K, V> target;
if (targetIndex >= 0) {
if (targetIndex >= entryList.size()) {
throw new RuntimeException("Could not find target entry with index " + targetIndex + ". This indicates a corrupted state.");
} else {
target = entryList.get(targetIndex);
}
} else {
target = null;
}
sourceEntry.edges.add(new SharedBufferEdge<K, V>(target, version));
}
}
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class SavepointStore method loadSavepointWithHandle.
/**
* Loads the savepoint at the specified path. This methods returns the savepoint, as well as the
* handle to the metadata.
*
* @param savepointFileOrDirectory Path to the parent savepoint directory or the meta data file.
* @param classLoader The class loader used to resolve serialized classes from legacy savepoint formats.
* @return The loaded savepoint
*
* @throws IOException Failures during load are forwarded
*/
public static Tuple2<Savepoint, StreamStateHandle> loadSavepointWithHandle(String savepointFileOrDirectory, ClassLoader classLoader) throws IOException {
checkNotNull(savepointFileOrDirectory, "savepointFileOrDirectory");
checkNotNull(classLoader, "classLoader");
Path path = new Path(savepointFileOrDirectory);
LOG.info("Loading savepoint from {}", path);
FileSystem fs = FileSystem.get(path.toUri());
FileStatus status = fs.getFileStatus(path);
// If this is a directory, we need to find the meta data file
if (status.isDir()) {
Path candidatePath = new Path(path, SAVEPOINT_METADATA_FILE);
if (fs.exists(candidatePath)) {
path = candidatePath;
LOG.info("Using savepoint file in {}", path);
} else {
throw new IOException("Cannot find meta data file in directory " + path + ". Please try to load the savepoint directly from the meta data file " + "instead of the directory.");
}
}
// load the savepoint
final Savepoint savepoint;
try (DataInputStream dis = new DataInputViewStreamWrapper(fs.open(path))) {
int magicNumber = dis.readInt();
if (magicNumber == MAGIC_NUMBER) {
int version = dis.readInt();
SavepointSerializer<?> serializer = SavepointSerializers.getSerializer(version);
savepoint = serializer.deserialize(dis, classLoader);
} else {
throw new RuntimeException("Unexpected magic number. This can have multiple reasons: " + "(1) You are trying to load a Flink 1.0 savepoint, which is not supported by this " + "version of Flink. (2) The file you were pointing to is not a savepoint at all. " + "(3) The savepoint file has been corrupted.");
}
}
// construct the stream handle to the metadata file
// we get the size best-effort
long size = 0;
try {
size = fs.getFileStatus(path).getLen();
} catch (Exception ignored) {
// we don't know the size, but we don't want to fail the savepoint loading for that
}
StreamStateHandle metadataHandle = new FileStateHandle(path, size);
return new Tuple2<>(savepoint, metadataHandle);
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class SerializationProxiesTest method testKeyedStateMetaInfoSerialization.
@Test
public void testKeyedStateMetaInfoSerialization() throws Exception {
String name = "test";
TypeSerializer<?> namespaceSerializer = LongSerializer.INSTANCE;
TypeSerializer<?> stateSerializer = DoubleSerializer.INSTANCE;
KeyedBackendSerializationProxy.StateMetaInfo<?, ?> metaInfo = new KeyedBackendSerializationProxy.StateMetaInfo<>(StateDescriptor.Type.VALUE, name, namespaceSerializer, stateSerializer);
byte[] serialized;
try (ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos()) {
metaInfo.write(new DataOutputViewStreamWrapper(out));
serialized = out.toByteArray();
}
metaInfo = new KeyedBackendSerializationProxy.StateMetaInfo<>(Thread.currentThread().getContextClassLoader());
try (ByteArrayInputStreamWithPos in = new ByteArrayInputStreamWithPos(serialized)) {
metaInfo.read(new DataInputViewStreamWrapper(in));
}
Assert.assertEquals(name, metaInfo.getStateName());
}
Aggregations