use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.
the class UpdateMetadataRequestTest method testTopicPartitionGroupingSizeReduction.
@Test
public void testTopicPartitionGroupingSizeReduction() {
Set<TopicPartition> tps = TestUtils.generateRandomTopicPartitions(10, 10);
List<UpdateMetadataPartitionState> partitionStates = new ArrayList<>();
for (TopicPartition tp : tps) {
partitionStates.add(new UpdateMetadataPartitionState().setTopicName(tp.topic()).setPartitionIndex(tp.partition()));
}
UpdateMetadataRequest.Builder builder = new UpdateMetadataRequest.Builder((short) 5, 0, 0, 0, partitionStates, Collections.emptyList(), Collections.emptyMap());
assertTrue(builder.build((short) 5).sizeInBytes() < builder.build((short) 4).sizeInBytes());
}
use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.
the class UpdateMetadataRequestTest method testVersionLogic.
/**
* Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions
* works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
String topic0 = "topic0";
String topic1 = "topic1";
for (short version : UPDATE_METADATA.allVersions()) {
List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
// Non plaintext endpoints are only supported from version 1
if (version >= 1) {
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
}
// Custom listeners are only supported from version 3
if (version >= 3) {
broker0Endpoints.get(0).setListener("listener0");
broker0Endpoints.get(1).setListener("listener1");
}
List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put(topic0, Uuid.randomUuid());
topicIds.put(topic1, Uuid.randomUuid());
UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveBrokers, request.liveBrokers());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// Rack is only supported from version 2
if (version < 2) {
for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
}
// Non plaintext listener name is only supported from version 3
if (version < 3) {
for (UpdateMetadataBroker liveBroker : liveBrokers) {
for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
}
}
}
// Offline replicas are only supported from version 4
if (version < 4)
partitionStates.get(0).setOfflineReplicas(emptyList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(liveBrokers, deserializedRequest.liveBrokers());
assertEquals(1, deserializedRequest.controllerId());
assertEquals(2, deserializedRequest.controllerEpoch());
// Broker epoch is only supported from version 5
if (version >= 5)
assertEquals(3, deserializedRequest.brokerEpoch());
else
assertEquals(-1, deserializedRequest.brokerEpoch());
long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> topicId != Uuid.ZERO_UUID).count();
if (version >= 7)
assertEquals(2, topicIdCount);
else
assertEquals(0, topicIdCount);
}
}
use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.
the class IntegrationTestUtils method waitUntilMetadataIsPropagated.
private static void waitUntilMetadataIsPropagated(final List<KafkaServer> servers, final String topic, final int partition, final long timeout) throws InterruptedException {
final String baseReason = String.format("Metadata for topic=%s partition=%d was not propagated to all brokers within %d ms. ", topic, partition, timeout);
retryOnExceptionWithTimeout(timeout, () -> {
final List<KafkaServer> emptyPartitionInfos = new ArrayList<>();
final List<KafkaServer> invalidBrokerIds = new ArrayList<>();
for (final KafkaServer server : servers) {
final MetadataCache metadataCache = server.dataPlaneRequestProcessor().metadataCache();
final Option<UpdateMetadataPartitionState> partitionInfo = metadataCache.getPartitionInfo(topic, partition);
if (partitionInfo.isEmpty()) {
emptyPartitionInfos.add(server);
continue;
}
final UpdateMetadataPartitionState metadataPartitionState = partitionInfo.get();
if (!Request.isValidBrokerId(metadataPartitionState.leader())) {
invalidBrokerIds.add(server);
}
}
final String reason = baseReason + ". Brokers without partition info: " + emptyPartitionInfos + ". Brokers with invalid broker id for partition leader: " + invalidBrokerIds;
assertThat(reason, emptyPartitionInfos.isEmpty() && invalidBrokerIds.isEmpty());
});
}
use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.
the class UpdateMetadataRequest method normalize.
private void normalize() {
// Version 3 added support for listener name, which we can infer from the security protocol for older versions
if (version() < 3) {
for (UpdateMetadataBroker liveBroker : data.liveBrokers()) {
// Set endpoints so that callers can rely on it always being present
if (version() == 0 && liveBroker.endpoints().isEmpty()) {
SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
liveBroker.setEndpoints(singletonList(new UpdateMetadataEndpoint().setHost(liveBroker.v0Host()).setPort(liveBroker.v0Port()).setSecurityProtocol(securityProtocol.id).setListener(ListenerName.forSecurityProtocol(securityProtocol).value())));
} else {
for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
// Set listener so that callers can rely on it always being present
if (endpoint.listener().isEmpty())
endpoint.setListener(listenerNameFromSecurityProtocol(endpoint));
}
}
}
}
if (version() >= 5) {
for (UpdateMetadataTopicState topicState : data.topicStates()) {
for (UpdateMetadataPartitionState partitionState : topicState.partitionStates()) {
// Set the topic name so that we can always present the ungrouped view to callers
partitionState.setTopicName(topicState.topicName());
}
}
}
}
use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.
the class MetadataRequestBenchmark method initializeMetadataCache.
private void initializeMetadataCache() {
List<UpdateMetadataBroker> liveBrokers = new LinkedList<>();
List<UpdateMetadataPartitionState> partitionStates = new LinkedList<>();
IntStream.range(0, 5).forEach(brokerId -> liveBrokers.add(new UpdateMetadataBroker().setId(brokerId).setEndpoints(endpoints(brokerId)).setRack("rack1")));
IntStream.range(0, topicCount).forEach(topicId -> {
String topicName = "topic-" + topicId;
IntStream.range(0, partitionCount).forEach(partitionId -> {
partitionStates.add(new UpdateMetadataPartitionState().setTopicName(topicName).setPartitionIndex(partitionId).setControllerEpoch(1).setLeader(partitionCount % 5).setLeaderEpoch(0).setIsr(Arrays.asList(0, 1, 3)).setZkVersion(1).setReplicas(Arrays.asList(0, 1, 3)));
});
});
UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 1, 1, 1, partitionStates, liveBrokers, Collections.emptyMap()).build();
metadataCache.updateMetadata(100, updateMetadataRequest);
}
Aggregations