use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplitSerializer in project flink by apache.
the class KafkaSourceEnumStateSerializerTest method testBackwardCompatibility.
@Test
public void testBackwardCompatibility() throws IOException {
final Set<TopicPartition> topicPartitions = constructTopicPartitions();
final Map<Integer, Set<KafkaPartitionSplit>> splitAssignments = toSplitAssignments(topicPartitions);
// Create bytes in the way of KafkaEnumStateSerializer version 0 doing serialization
final byte[] bytes = SerdeUtils.serializeSplitAssignments(splitAssignments, new KafkaPartitionSplitSerializer());
// Deserialize above bytes with KafkaEnumStateSerializer version 1 to check backward
// compatibility
final KafkaSourceEnumState kafkaSourceEnumState = new KafkaSourceEnumStateSerializer().deserialize(0, bytes);
assertEquals(topicPartitions, kafkaSourceEnumState.assignedPartitions());
}
use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplitSerializer in project flink by apache.
the class KafkaSourceEnumStateSerializer method deserialize.
@Override
public KafkaSourceEnumState deserialize(int version, byte[] serialized) throws IOException {
if (version == CURRENT_VERSION) {
final Set<TopicPartition> assignedPartitions = deserializeTopicPartitions(serialized);
return new KafkaSourceEnumState(assignedPartitions);
}
// Backward compatibility
if (version == VERSION_0) {
Map<Integer, Set<KafkaPartitionSplit>> currentPartitionAssignment = SerdeUtils.deserializeSplitAssignments(serialized, new KafkaPartitionSplitSerializer(), HashSet::new);
Set<TopicPartition> currentAssignedSplits = new HashSet<>();
currentPartitionAssignment.forEach((reader, splits) -> splits.forEach(split -> currentAssignedSplits.add(split.getTopicPartition())));
return new KafkaSourceEnumState(currentAssignedSplits);
}
throw new IOException(String.format("The bytes are serialized with version %d, " + "while this deserializer only supports version up to %d", version, CURRENT_VERSION));
}
Aggregations