use of org.apache.flink.api.common.typeutils.TypeSerializerSnapshot in project flink by apache.
the class KryoSerializerCompatibilityTest method testMigrationStrategyForRemovedAvroDependency.
@Test
public void testMigrationStrategyForRemovedAvroDependency() throws Exception {
KryoSerializer<TestClass> kryoSerializerForA = new KryoSerializer<>(TestClass.class, new ExecutionConfig());
// read configuration again from bytes
TypeSerializerSnapshot kryoSerializerConfigSnapshot;
try (InputStream in = getClass().getResourceAsStream("/kryo-serializer-flink1.3-snapshot")) {
kryoSerializerConfigSnapshot = TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(new DataInputViewStreamWrapper(in), Thread.currentThread().getContextClassLoader(), kryoSerializerForA);
}
@SuppressWarnings("unchecked") TypeSerializerSchemaCompatibility<TestClass> compatResult = kryoSerializerConfigSnapshot.resolveSchemaCompatibility(kryoSerializerForA);
assertTrue(compatResult.isCompatibleAsIs());
}
use of org.apache.flink.api.common.typeutils.TypeSerializerSnapshot in project flink by apache.
the class RocksDBKeyedStateBackend method migrateStateValues.
/**
* Migrate only the state value, that is the "value" that is stored in RocksDB. We don't migrate
* the key here, which is made up of key group, key, namespace and map key (in case of
* MapState).
*/
@SuppressWarnings("unchecked")
private <N, S extends State, SV> void migrateStateValues(StateDescriptor<S, SV> stateDesc, Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> stateMetaInfo) throws Exception {
if (stateDesc.getType() == StateDescriptor.Type.MAP) {
TypeSerializerSnapshot<SV> previousSerializerSnapshot = stateMetaInfo.f1.getPreviousStateSerializerSnapshot();
checkState(previousSerializerSnapshot != null, "the previous serializer snapshot should exist.");
checkState(previousSerializerSnapshot instanceof MapSerializerSnapshot, "previous serializer snapshot should be a MapSerializerSnapshot.");
TypeSerializer<SV> newSerializer = stateMetaInfo.f1.getStateSerializer();
checkState(newSerializer instanceof MapSerializer, "new serializer should be a MapSerializer.");
MapSerializer<?, ?> mapSerializer = (MapSerializer<?, ?>) newSerializer;
MapSerializerSnapshot<?, ?> mapSerializerSnapshot = (MapSerializerSnapshot<?, ?>) previousSerializerSnapshot;
if (!checkMapStateKeySchemaCompatibility(mapSerializerSnapshot, mapSerializer)) {
throw new StateMigrationException("The new serializer for a MapState requires state migration in order for the job to proceed, since the key schema has changed. However, migration for MapState currently only allows value schema evolutions.");
}
}
LOG.info("Performing state migration for state {} because the state serializer's schema, i.e. serialization format, has changed.", stateDesc);
// we need to get an actual state instance because migration is different
// for different state types. For example, ListState needs to deal with
// individual elements
StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getType());
if (stateFactory == null) {
String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass());
throw new FlinkRuntimeException(message);
}
State state = stateFactory.createState(stateDesc, stateMetaInfo, RocksDBKeyedStateBackend.this);
if (!(state instanceof AbstractRocksDBState)) {
throw new FlinkRuntimeException("State should be an AbstractRocksDBState but is " + state);
}
@SuppressWarnings("unchecked") AbstractRocksDBState<?, ?, SV> rocksDBState = (AbstractRocksDBState<?, ?, SV>) state;
Snapshot rocksDBSnapshot = db.getSnapshot();
try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(db, stateMetaInfo.f0, readOptions);
RocksDBWriteBatchWrapper batchWriter = new RocksDBWriteBatchWrapper(db, getWriteOptions(), getWriteBatchSize())) {
iterator.seekToFirst();
DataInputDeserializer serializedValueInput = new DataInputDeserializer();
DataOutputSerializer migratedSerializedValueOutput = new DataOutputSerializer(512);
while (iterator.isValid()) {
serializedValueInput.setBuffer(iterator.value());
rocksDBState.migrateSerializedValue(serializedValueInput, migratedSerializedValueOutput, stateMetaInfo.f1.getPreviousStateSerializer(), stateMetaInfo.f1.getStateSerializer());
batchWriter.put(stateMetaInfo.f0, iterator.key(), migratedSerializedValueOutput.getCopyOfBuffer());
migratedSerializedValueOutput.clear();
iterator.next();
}
} finally {
db.releaseSnapshot(rocksDBSnapshot);
rocksDBSnapshot.close();
}
}
use of org.apache.flink.api.common.typeutils.TypeSerializerSnapshot in project flink by apache.
the class KeyedBackendSerializationProxy method read.
@SuppressWarnings("unchecked")
@Override
public void read(DataInputView in) throws IOException {
super.read(in);
final int readVersion = getReadVersion();
if (readVersion >= 4) {
usingKeyGroupCompression = in.readBoolean();
} else {
usingKeyGroupCompression = false;
}
// only starting from version 3, we have the key serializer and its config snapshot written
if (readVersion >= 6) {
this.keySerializerSnapshot = TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(in, userCodeClassLoader, null);
} else if (readVersion >= 3) {
Tuple2<TypeSerializer<?>, TypeSerializerSnapshot<?>> keySerializerAndConfig = TypeSerializerSerializationUtil.readSerializersAndConfigsWithResilience(in, userCodeClassLoader).get(0);
this.keySerializerSnapshot = (TypeSerializerSnapshot<K>) keySerializerAndConfig.f1;
} else {
this.keySerializerSnapshot = new BackwardsCompatibleSerializerSnapshot<>(TypeSerializerSerializationUtil.tryReadSerializer(in, userCodeClassLoader, true));
}
this.keySerializer = null;
Integer metaInfoSnapshotVersion = META_INFO_SNAPSHOT_FORMAT_VERSION_MAPPER.get(readVersion);
if (metaInfoSnapshotVersion == null) {
// this should not happen; guard for the future
throw new IOException("Cannot determine corresponding meta info snapshot version for keyed backend serialization readVersion=" + readVersion);
}
final StateMetaInfoReader stateMetaInfoReader = StateMetaInfoSnapshotReadersWriters.getReader(metaInfoSnapshotVersion, StateMetaInfoSnapshotReadersWriters.StateTypeHint.KEYED_STATE);
int numKvStates = in.readShort();
stateMetaInfoSnapshots = new ArrayList<>(numKvStates);
for (int i = 0; i < numKvStates; i++) {
StateMetaInfoSnapshot snapshot = stateMetaInfoReader.readStateMetaInfoSnapshot(in, userCodeClassLoader);
stateMetaInfoSnapshots.add(snapshot);
}
}
Aggregations