use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class RocksDBKeyedStateBackend method restoreOldSavepointKeyedState.
/**
* For backwards compatibility, remove again later!
*/
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> restoreState) throws Exception {
if (restoreState.isEmpty()) {
return;
}
Preconditions.checkState(1 == restoreState.size(), "Only one element expected here.");
HashMap<String, RocksDBStateBackend.FinalFullyAsyncSnapshot> namedStates;
try (FSDataInputStream inputStream = restoreState.iterator().next().openInputStream()) {
namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
}
Preconditions.checkState(1 == namedStates.size(), "Only one element expected here.");
DataInputView inputView = namedStates.values().iterator().next().stateHandle.getState(userCodeClassLoader);
// clear k/v state information before filling it
kvStateInformation.clear();
// first get the column family mapping
int numColumns = inputView.readInt();
Map<Byte, StateDescriptor<?, ?>> columnFamilyMapping = new HashMap<>(numColumns);
for (int i = 0; i < numColumns; i++) {
byte mappingByte = inputView.readByte();
ObjectInputStream ooIn = new InstantiationUtil.ClassLoaderObjectInputStream(new DataInputViewStream(inputView), userCodeClassLoader);
StateDescriptor stateDescriptor = (StateDescriptor) ooIn.readObject();
columnFamilyMapping.put(mappingByte, stateDescriptor);
// this will fill in the k/v state information
getColumnFamily(stateDescriptor, MigrationNamespaceSerializerProxy.INSTANCE);
}
// try and read until EOF
try {
// the EOFException will get us out of this...
while (true) {
byte mappingByte = inputView.readByte();
ColumnFamilyHandle handle = getColumnFamily(columnFamilyMapping.get(mappingByte), MigrationNamespaceSerializerProxy.INSTANCE);
byte[] keyAndNamespace = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
ByteArrayInputStreamWithPos bis = new ByteArrayInputStreamWithPos(keyAndNamespace);
K reconstructedKey = keySerializer.deserialize(new DataInputViewStreamWrapper(bis));
int len = bis.getPosition();
int keyGroup = (byte) KeyGroupRangeAssignment.assignToKeyGroup(reconstructedKey, numberOfKeyGroups);
if (keyGroupPrefixBytes == 1) {
// copy and override one byte (42) between key and namespace
System.arraycopy(keyAndNamespace, 0, keyAndNamespace, 1, len);
keyAndNamespace[0] = (byte) keyGroup;
} else {
byte[] largerKey = new byte[1 + keyAndNamespace.length];
// write key-group
largerKey[0] = (byte) ((keyGroup >> 8) & 0xFF);
largerKey[1] = (byte) (keyGroup & 0xFF);
// write key
System.arraycopy(keyAndNamespace, 0, largerKey, 2, len);
//skip one byte (42), write namespace
System.arraycopy(keyAndNamespace, 1 + len, largerKey, 2 + len, keyAndNamespace.length - len - 1);
keyAndNamespace = largerKey;
}
byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
db.put(handle, keyAndNamespace, value);
}
} catch (EOFException e) {
// expected
}
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class AbstractMigrationRestoreStrategy method deserialize.
@Override
public StateTable<K, N, S> deserialize(String stateName, HeapKeyedStateBackend<K> stateBackend) throws IOException {
Preconditions.checkNotNull(stateName, "State name is null. Cannot deserialize snapshot.");
Preconditions.checkNotNull(stateBackend, "State backend is null. Cannot deserialize snapshot.");
final KeyGroupRange keyGroupRange = stateBackend.getKeyGroupRange();
Preconditions.checkState(1 == keyGroupRange.getNumberOfKeyGroups(), "Unexpected number of key-groups for restoring from Flink 1.1");
TypeSerializer<N> patchedNamespaceSerializer = this.namespaceSerializer;
if (patchedNamespaceSerializer instanceof VoidSerializer) {
patchedNamespaceSerializer = (TypeSerializer<N>) VoidNamespaceSerializer.INSTANCE;
}
RegisteredBackendStateMetaInfo<N, S> registeredBackendStateMetaInfo = new RegisteredBackendStateMetaInfo<>(StateDescriptor.Type.UNKNOWN, stateName, patchedNamespaceSerializer, stateSerializer);
final StateTable<K, N, S> stateTable = stateBackend.newStateTable(registeredBackendStateMetaInfo);
final DataInputView inView = openDataInputView();
final int keyGroup = keyGroupRange.getStartKeyGroup();
final int numNamespaces = inView.readInt();
for (int i = 0; i < numNamespaces; i++) {
N namespace = namespaceSerializer.deserialize(inView);
if (null == namespace) {
namespace = (N) VoidNamespace.INSTANCE;
}
final int numKV = inView.readInt();
for (int j = 0; j < numKV; j++) {
K key = keySerializer.deserialize(inView);
S value = stateSerializer.deserialize(inView);
stateTable.put(key, keyGroup, namespace, value);
}
}
return stateTable;
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class AvroSerializerSnapshotTest method restorePastSnapshots.
@Test
public void restorePastSnapshots() throws IOException {
for (int pastVersion : PAST_VERSIONS) {
AvroSerializer<GenericRecord> currentSerializer = new AvroSerializer<>(GenericRecord.class, Address.getClassSchema());
DataInputView in = new DataInputDeserializer(Files.readAllBytes(getSerializerSnapshotFilePath(pastVersion)));
TypeSerializerSnapshot<GenericRecord> restored = TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(in, AvroSerializer.class.getClassLoader(), null);
assertThat(restored.resolveSchemaCompatibility(currentSerializer), isCompatibleAsIs());
}
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class TypeSerializerUpgradeTestBase method upgradedSerializerIsValidAfterMigration.
@Test
public void upgradedSerializerIsValidAfterMigration() throws Exception {
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(testSpecification.verifier.verifierClassloader)) {
TypeSerializerSnapshot<UpgradedElementT> restoredSerializerSnapshot = snapshotUnderTest();
TypeSerializer<UpgradedElementT> upgradedSerializer = testSpecification.verifier.createUpgradedSerializer();
TypeSerializerSchemaCompatibility<UpgradedElementT> upgradeCompatibility = restoredSerializerSnapshot.resolveSchemaCompatibility(upgradedSerializer);
assumeThat("This test only applies for test specifications that verify an upgraded serializer that requires migration to be compatible.", upgradeCompatibility, TypeSerializerMatchers.isCompatibleAfterMigration());
// migrate the previous data schema,
TypeSerializer<UpgradedElementT> restoreSerializer = restoredSerializerSnapshot.restoreSerializer();
DataInputView migratedData = readAndThenWriteData(dataUnderTest(), restoreSerializer, upgradedSerializer, testSpecification.verifier.testDataMatcher());
// .. and then assert that the upgraded serializer is valid with the migrated data
assertSerializerIsValid(upgradedSerializer, migratedData, testSpecification.verifier.testDataMatcher());
}
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class OutputEmitterTest method testWrongKeyClass.
@Test
public void testWrongKeyClass() throws Exception {
// Test for IntValue
final TypeComparator<Record> doubleComp = new RecordComparatorFactory(new int[] { 0 }, new Class[] { DoubleValue.class }).createComparator();
final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector(ShipStrategyType.PARTITION_HASH, doubleComp, 100);
final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer());
PipedInputStream pipedInput = new PipedInputStream(1024 * 1024);
DataInputView in = new DataInputViewStreamWrapper(pipedInput);
DataOutputView out = new DataOutputViewStreamWrapper(new PipedOutputStream(pipedInput));
Record record = new Record(1);
record.setField(0, new IntValue());
record.write(out);
record = new Record();
record.read(in);
try {
delegate.setInstance(record);
selector.selectChannel(delegate);
} catch (DeserializationException re) {
return;
}
Assert.fail("Expected a NullKeyFieldException.");
}
Aggregations