use of org.apache.flink.core.memory.ByteArrayInputStreamWithPos in project flink by apache.
the class SerializationProxiesTest method testKeyedStateMetaInfoSerialization.
@Test
public void testKeyedStateMetaInfoSerialization() throws Exception {
String name = "test";
TypeSerializer<?> namespaceSerializer = LongSerializer.INSTANCE;
TypeSerializer<?> stateSerializer = DoubleSerializer.INSTANCE;
KeyedBackendSerializationProxy.StateMetaInfo<?, ?> metaInfo = new KeyedBackendSerializationProxy.StateMetaInfo<>(StateDescriptor.Type.VALUE, name, namespaceSerializer, stateSerializer);
byte[] serialized;
try (ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos()) {
metaInfo.write(new DataOutputViewStreamWrapper(out));
serialized = out.toByteArray();
}
metaInfo = new KeyedBackendSerializationProxy.StateMetaInfo<>(Thread.currentThread().getContextClassLoader());
try (ByteArrayInputStreamWithPos in = new ByteArrayInputStreamWithPos(serialized)) {
metaInfo.read(new DataInputViewStreamWrapper(in));
}
Assert.assertEquals(name, metaInfo.getStateName());
}
use of org.apache.flink.core.memory.ByteArrayInputStreamWithPos in project flink by apache.
the class RocksDBAggregatingState method mergeNamespaces.
@Override
public void mergeNamespaces(N target, Collection<N> sources) throws Exception {
if (sources == null || sources.isEmpty()) {
return;
}
// cache key and namespace
final K key = backend.getCurrentKey();
final int keyGroup = backend.getCurrentKeyGroupIndex();
try {
ACC current = null;
// merge the sources to the target
for (N source : sources) {
if (source != null) {
writeKeyWithGroupAndNamespace(keyGroup, key, source, keySerializationStream, keySerializationDataOutputView);
final byte[] sourceKey = keySerializationStream.toByteArray();
final byte[] valueBytes = backend.db.get(columnFamily, sourceKey);
if (valueBytes != null) {
ACC value = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
if (current != null) {
current = aggFunction.merge(current, value);
} else {
current = value;
}
}
}
}
// if something came out of merging the sources, merge it or write it to the target
if (current != null) {
// create the target full-binary-key
writeKeyWithGroupAndNamespace(keyGroup, key, target, keySerializationStream, keySerializationDataOutputView);
final byte[] targetKey = keySerializationStream.toByteArray();
final byte[] targetValueBytes = backend.db.get(columnFamily, targetKey);
if (targetValueBytes != null) {
// target also had a value, merge
ACC value = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(targetValueBytes)));
current = aggFunction.merge(current, value);
}
// serialize the resulting value
keySerializationStream.reset();
valueSerializer.serialize(current, keySerializationDataOutputView);
// write the resulting value
backend.db.put(columnFamily, writeOptions, targetKey, keySerializationStream.toByteArray());
}
} catch (Exception e) {
throw new Exception("Error while merging state in RocksDB", e);
}
}
use of org.apache.flink.core.memory.ByteArrayInputStreamWithPos in project flink by apache.
the class RocksDBKeyedStateBackend method restoreOldSavepointKeyedState.
/**
* For backwards compatibility, remove again later!
*/
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> restoreState) throws Exception {
if (restoreState.isEmpty()) {
return;
}
Preconditions.checkState(1 == restoreState.size(), "Only one element expected here.");
HashMap<String, RocksDBStateBackend.FinalFullyAsyncSnapshot> namedStates;
try (FSDataInputStream inputStream = restoreState.iterator().next().openInputStream()) {
namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
}
Preconditions.checkState(1 == namedStates.size(), "Only one element expected here.");
DataInputView inputView = namedStates.values().iterator().next().stateHandle.getState(userCodeClassLoader);
// clear k/v state information before filling it
kvStateInformation.clear();
// first get the column family mapping
int numColumns = inputView.readInt();
Map<Byte, StateDescriptor<?, ?>> columnFamilyMapping = new HashMap<>(numColumns);
for (int i = 0; i < numColumns; i++) {
byte mappingByte = inputView.readByte();
ObjectInputStream ooIn = new InstantiationUtil.ClassLoaderObjectInputStream(new DataInputViewStream(inputView), userCodeClassLoader);
StateDescriptor stateDescriptor = (StateDescriptor) ooIn.readObject();
columnFamilyMapping.put(mappingByte, stateDescriptor);
// this will fill in the k/v state information
getColumnFamily(stateDescriptor, MigrationNamespaceSerializerProxy.INSTANCE);
}
// try and read until EOF
try {
// the EOFException will get us out of this...
while (true) {
byte mappingByte = inputView.readByte();
ColumnFamilyHandle handle = getColumnFamily(columnFamilyMapping.get(mappingByte), MigrationNamespaceSerializerProxy.INSTANCE);
byte[] keyAndNamespace = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
ByteArrayInputStreamWithPos bis = new ByteArrayInputStreamWithPos(keyAndNamespace);
K reconstructedKey = keySerializer.deserialize(new DataInputViewStreamWrapper(bis));
int len = bis.getPosition();
int keyGroup = (byte) KeyGroupRangeAssignment.assignToKeyGroup(reconstructedKey, numberOfKeyGroups);
if (keyGroupPrefixBytes == 1) {
// copy and override one byte (42) between key and namespace
System.arraycopy(keyAndNamespace, 0, keyAndNamespace, 1, len);
keyAndNamespace[0] = (byte) keyGroup;
} else {
byte[] largerKey = new byte[1 + keyAndNamespace.length];
// write key-group
largerKey[0] = (byte) ((keyGroup >> 8) & 0xFF);
largerKey[1] = (byte) (keyGroup & 0xFF);
// write key
System.arraycopy(keyAndNamespace, 0, largerKey, 2, len);
//skip one byte (42), write namespace
System.arraycopy(keyAndNamespace, 1 + len, largerKey, 2 + len, keyAndNamespace.length - len - 1);
keyAndNamespace = largerKey;
}
byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
db.put(handle, keyAndNamespace, value);
}
} catch (EOFException e) {
// expected
}
}
use of org.apache.flink.core.memory.ByteArrayInputStreamWithPos in project flink by apache.
the class RocksDBMapState method deserializeUserValue.
private UV deserializeUserValue(byte[] rawValueBytes) throws IOException {
ByteArrayInputStreamWithPos bais = new ByteArrayInputStreamWithPos(rawValueBytes);
DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(bais);
boolean isNull = in.readBoolean();
return isNull ? null : userValueSerializer.deserialize(in);
}
use of org.apache.flink.core.memory.ByteArrayInputStreamWithPos in project flink by apache.
the class TypeSerializerSerializationProxyTest method testStateSerializerSerializationProxyClassNotFound.
@Test
public void testStateSerializerSerializationProxyClassNotFound() throws Exception {
TypeSerializer<?> serializer = IntSerializer.INSTANCE;
TypeSerializerSerializationProxy<?> proxy = new TypeSerializerSerializationProxy<>(serializer);
byte[] serialized;
try (ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos()) {
proxy.write(new DataOutputViewStreamWrapper(out));
serialized = out.toByteArray();
}
proxy = new TypeSerializerSerializationProxy<>(new URLClassLoader(new URL[0], null));
try (ByteArrayInputStreamWithPos in = new ByteArrayInputStreamWithPos(serialized)) {
proxy.read(new DataInputViewStreamWrapper(in));
fail("ClassNotFoundException expected, leading to IOException");
} catch (IOException expected) {
}
proxy = new TypeSerializerSerializationProxy<>(new URLClassLoader(new URL[0], null), true);
try (ByteArrayInputStreamWithPos in = new ByteArrayInputStreamWithPos(serialized)) {
proxy.read(new DataInputViewStreamWrapper(in));
}
Assert.assertTrue(proxy.getTypeSerializer() instanceof TypeSerializerSerializationProxy.ClassNotFoundDummyTypeSerializer);
Assert.assertArrayEquals(InstantiationUtil.serializeObject(serializer), ((TypeSerializerSerializationProxy.ClassNotFoundDummyTypeSerializer<?>) proxy.getTypeSerializer()).getActualBytes());
}
Aggregations