Search in sources :

Example 1 with UpdateMetadataPartitionState

use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.

the class UpdateMetadataRequestTest method testTopicPartitionGroupingSizeReduction.

@Test
public void testTopicPartitionGroupingSizeReduction() {
    Set<TopicPartition> tps = TestUtils.generateRandomTopicPartitions(10, 10);
    List<UpdateMetadataPartitionState> partitionStates = new ArrayList<>();
    for (TopicPartition tp : tps) {
        partitionStates.add(new UpdateMetadataPartitionState().setTopicName(tp.topic()).setPartitionIndex(tp.partition()));
    }
    UpdateMetadataRequest.Builder builder = new UpdateMetadataRequest.Builder((short) 5, 0, 0, 0, partitionStates, Collections.emptyList(), Collections.emptyMap());
    assertTrue(builder.build((short) 5).sizeInBytes() < builder.build((short) 4).sizeInBytes());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) Test(org.junit.jupiter.api.Test)

Example 2 with UpdateMetadataPartitionState

use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.

the class UpdateMetadataRequestTest method testVersionLogic.

/**
 * Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions
 * works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in
 * recent versions, but we set it manually so that we can always present the ungrouped partition states
 * independently of the version.
 */
@Test
public void testVersionLogic() {
    String topic0 = "topic0";
    String topic1 = "topic1";
    for (short version : UPDATE_METADATA.allVersions()) {
        List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
        List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
        broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
        // Non plaintext endpoints are only supported from version 1
        if (version >= 1) {
            broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
        }
        // Custom listeners are only supported from version 3
        if (version >= 3) {
            broker0Endpoints.get(0).setListener("listener0");
            broker0Endpoints.get(1).setListener("listener1");
        }
        List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
        Map<String, Uuid> topicIds = new HashMap<>();
        topicIds.put(topic0, Uuid.randomUuid());
        topicIds.put(topic1, Uuid.randomUuid());
        UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
        assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
        assertEquals(liveBrokers, request.liveBrokers());
        assertEquals(1, request.controllerId());
        assertEquals(2, request.controllerEpoch());
        assertEquals(3, request.brokerEpoch());
        ByteBuffer byteBuffer = request.serialize();
        UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
        // Rack is only supported from version 2
        if (version < 2) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
        }
        // Non plaintext listener name is only supported from version 3
        if (version < 3) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) {
                for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
                    SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
                    endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
                }
            }
        }
        // Offline replicas are only supported from version 4
        if (version < 4)
            partitionStates.get(0).setOfflineReplicas(emptyList());
        assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
        assertEquals(liveBrokers, deserializedRequest.liveBrokers());
        assertEquals(1, deserializedRequest.controllerId());
        assertEquals(2, deserializedRequest.controllerEpoch());
        // Broker epoch is only supported from version 5
        if (version >= 5)
            assertEquals(3, deserializedRequest.brokerEpoch());
        else
            assertEquals(-1, deserializedRequest.brokerEpoch());
        long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> topicId != Uuid.ZERO_UUID).count();
        if (version >= 7)
            assertEquals(2, topicIdCount);
        else
            assertEquals(0, topicIdCount);
    }
}
Also used : Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) HashMap(java.util.HashMap) UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListenerName(org.apache.kafka.common.network.ListenerName) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) StreamSupport(java.util.stream.StreamSupport) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) TestUtils(org.apache.kafka.test.TestUtils) Collections.emptyList(java.util.Collections.emptyList) Set(java.util.Set) UPDATE_METADATA(org.apache.kafka.common.protocol.ApiKeys.UPDATE_METADATA) Collectors(java.util.stream.Collectors) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) Test(org.junit.jupiter.api.Test) List(java.util.List) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Errors(org.apache.kafka.common.protocol.Errors) Collections(java.util.Collections) UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ByteBuffer(java.nio.ByteBuffer) Uuid(org.apache.kafka.common.Uuid) UpdateMetadataRequestData(org.apache.kafka.common.message.UpdateMetadataRequestData) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) Test(org.junit.jupiter.api.Test)

Example 3 with UpdateMetadataPartitionState

use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.

the class IntegrationTestUtils method waitUntilMetadataIsPropagated.

private static void waitUntilMetadataIsPropagated(final List<KafkaServer> servers, final String topic, final int partition, final long timeout) throws InterruptedException {
    final String baseReason = String.format("Metadata for topic=%s partition=%d was not propagated to all brokers within %d ms. ", topic, partition, timeout);
    retryOnExceptionWithTimeout(timeout, () -> {
        final List<KafkaServer> emptyPartitionInfos = new ArrayList<>();
        final List<KafkaServer> invalidBrokerIds = new ArrayList<>();
        for (final KafkaServer server : servers) {
            final MetadataCache metadataCache = server.dataPlaneRequestProcessor().metadataCache();
            final Option<UpdateMetadataPartitionState> partitionInfo = metadataCache.getPartitionInfo(topic, partition);
            if (partitionInfo.isEmpty()) {
                emptyPartitionInfos.add(server);
                continue;
            }
            final UpdateMetadataPartitionState metadataPartitionState = partitionInfo.get();
            if (!Request.isValidBrokerId(metadataPartitionState.leader())) {
                invalidBrokerIds.add(server);
            }
        }
        final String reason = baseReason + ". Brokers without partition info: " + emptyPartitionInfos + ". Brokers with invalid broker id for partition leader: " + invalidBrokerIds;
        assertThat(reason, emptyPartitionInfos.isEmpty() && invalidBrokerIds.isEmpty());
    });
}
Also used : KafkaServer(kafka.server.KafkaServer) ArrayList(java.util.ArrayList) MetadataCache(kafka.server.MetadataCache) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)

Example 4 with UpdateMetadataPartitionState

use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.

the class UpdateMetadataRequest method normalize.

private void normalize() {
    // Version 3 added support for listener name, which we can infer from the security protocol for older versions
    if (version() < 3) {
        for (UpdateMetadataBroker liveBroker : data.liveBrokers()) {
            // Set endpoints so that callers can rely on it always being present
            if (version() == 0 && liveBroker.endpoints().isEmpty()) {
                SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
                liveBroker.setEndpoints(singletonList(new UpdateMetadataEndpoint().setHost(liveBroker.v0Host()).setPort(liveBroker.v0Port()).setSecurityProtocol(securityProtocol.id).setListener(ListenerName.forSecurityProtocol(securityProtocol).value())));
            } else {
                for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
                    // Set listener so that callers can rely on it always being present
                    if (endpoint.listener().isEmpty())
                        endpoint.setListener(listenerNameFromSecurityProtocol(endpoint));
                }
            }
        }
    }
    if (version() >= 5) {
        for (UpdateMetadataTopicState topicState : data.topicStates()) {
            for (UpdateMetadataPartitionState partitionState : topicState.partitionStates()) {
                // Set the topic name so that we can always present the ungrouped view to callers
                partitionState.setTopicName(topicState.topicName());
            }
        }
    }
}
Also used : UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) UpdateMetadataTopicState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataTopicState) UpdateMetadataEndpoint(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)

Example 5 with UpdateMetadataPartitionState

use of org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState in project kafka by apache.

the class MetadataRequestBenchmark method initializeMetadataCache.

private void initializeMetadataCache() {
    List<UpdateMetadataBroker> liveBrokers = new LinkedList<>();
    List<UpdateMetadataPartitionState> partitionStates = new LinkedList<>();
    IntStream.range(0, 5).forEach(brokerId -> liveBrokers.add(new UpdateMetadataBroker().setId(brokerId).setEndpoints(endpoints(brokerId)).setRack("rack1")));
    IntStream.range(0, topicCount).forEach(topicId -> {
        String topicName = "topic-" + topicId;
        IntStream.range(0, partitionCount).forEach(partitionId -> {
            partitionStates.add(new UpdateMetadataPartitionState().setTopicName(topicName).setPartitionIndex(partitionId).setControllerEpoch(1).setLeader(partitionCount % 5).setLeaderEpoch(0).setIsr(Arrays.asList(0, 1, 3)).setZkVersion(1).setReplicas(Arrays.asList(0, 1, 3)));
        });
    });
    UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 1, 1, 1, partitionStates, liveBrokers, Collections.emptyMap()).build();
    metadataCache.updateMetadata(100, updateMetadataRequest);
}
Also used : UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) UpdateMetadataBroker(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker) LinkedList(java.util.LinkedList) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)

Aggregations

UpdateMetadataPartitionState (org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState)6 ArrayList (java.util.ArrayList)4 UpdateMetadataBroker (org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker)4 UpdateMetadataEndpoint (org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint)3 SecurityProtocol (org.apache.kafka.common.security.auth.SecurityProtocol)3 HashMap (java.util.HashMap)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 Uuid (org.apache.kafka.common.Uuid)2 Test (org.junit.jupiter.api.Test)2 ByteBuffer (java.nio.ByteBuffer)1 Arrays.asList (java.util.Arrays.asList)1 Collections (java.util.Collections)1 Collections.emptyList (java.util.Collections.emptyList)1 HashSet (java.util.HashSet)1 LinkedHashMap (java.util.LinkedHashMap)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 Collectors (java.util.stream.Collectors)1