use of org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionV0 in project kafka by apache.
the class StopReplicaRequest method topicStates.
/**
* Note that this method has allocation overhead per iterated element, so callers should copy the result into
* another collection if they need to iterate more than once.
*
* Implementation note: we should strive to avoid allocation overhead per element, see
* `UpdateMetadataRequest.partitionStates()` for the preferred approach. That's not possible in this case and
* StopReplicaRequest should be relatively rare in comparison to other request types.
*/
public Iterable<StopReplicaTopicState> topicStates() {
if (version() < 1) {
Map<String, StopReplicaTopicState> topicStates = new HashMap<>();
for (StopReplicaPartitionV0 partition : data.ungroupedPartitions()) {
StopReplicaTopicState topicState = topicStates.computeIfAbsent(partition.topicName(), topic -> new StopReplicaTopicState().setTopicName(topic));
topicState.partitionStates().add(new StopReplicaPartitionState().setPartitionIndex(partition.partitionIndex()).setDeletePartition(data.deletePartitions()));
}
return topicStates.values();
} else if (version() < 3) {
return () -> new MappedIterator<>(data.topics().iterator(), topic -> new StopReplicaTopicState().setTopicName(topic.name()).setPartitionStates(topic.partitionIndexes().stream().map(partition -> new StopReplicaPartitionState().setPartitionIndex(partition).setDeletePartition(data.deletePartitions())).collect(Collectors.toList())));
} else {
return data.topicStates();
}
}
use of org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionV0 in project kafka by apache.
the class StopReplicaRequestTest method testBuilderNormalization.
private void testBuilderNormalization(boolean deletePartitions) {
List<StopReplicaTopicState> topicStates = topicStates(deletePartitions);
Map<TopicPartition, StopReplicaPartitionState> expectedPartitionStates = StopReplicaRequestTest.partitionStates(topicStates);
for (short version : STOP_REPLICA.allVersions()) {
StopReplicaRequest request = new StopReplicaRequest.Builder(version, 0, 1, 0, deletePartitions, topicStates).build(version);
StopReplicaRequestData data = request.data();
if (version < 1) {
Set<TopicPartition> partitions = new HashSet<>();
for (StopReplicaPartitionV0 partition : data.ungroupedPartitions()) {
partitions.add(new TopicPartition(partition.topicName(), partition.partitionIndex()));
}
assertEquals(expectedPartitionStates.keySet(), partitions);
assertEquals(deletePartitions, data.deletePartitions());
} else if (version < 3) {
Set<TopicPartition> partitions = new HashSet<>();
for (StopReplicaTopicV1 topic : data.topics()) {
for (Integer partition : topic.partitionIndexes()) {
partitions.add(new TopicPartition(topic.name(), partition));
}
}
assertEquals(expectedPartitionStates.keySet(), partitions);
assertEquals(deletePartitions, data.deletePartitions());
} else {
Map<TopicPartition, StopReplicaPartitionState> partitionStates = StopReplicaRequestTest.partitionStates(data.topicStates());
assertEquals(expectedPartitionStates, partitionStates);
// Always false from V3 on
assertFalse(data.deletePartitions());
}
}
}
Aggregations