Search in sources :

Example 31 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class KafkaConsumerTest method testPartitionsForNonExistingTopic.

@Test
public void testPartitionsForNonExistingTopic() {
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    initMetadata(client, Collections.singletonMap(topic, 1));
    Cluster cluster = metadata.fetch();
    MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), Collections.emptyList());
    client.prepareResponse(updateResponse);
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
    assertEquals(Collections.emptyList(), consumer.partitionsFor("non-exist-topic"));
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Cluster(org.apache.kafka.common.Cluster) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 32 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class ConsumerCoordinatorTest method testForceMetadataDeleteForPatternSubscriptionDuringRebalance.

@Test
public void testForceMetadataDeleteForPatternSubscriptionDuringRebalance() {
    try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
        subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener);
        client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap<String, Integer>() {

            {
                put(topic1, 1);
                put(topic2, 1);
            }
        }));
        coordinator.maybeUpdateSubscriptionMetadata();
        assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), subscriptions.subscription());
        client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
        coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
        MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap<String, Integer>() {

            {
                put(topic1, 1);
            }
        });
        // Instrument the test so that metadata will contain only one topic after next refresh.
        client.prepareMetadataUpdate(deletedMetadataResponse);
        client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
        client.prepareResponse(body -> {
            SyncGroupRequest sync = (SyncGroupRequest) body;
            return sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().isEmpty();
        }, syncGroupResponse(singletonList(t1p), Errors.NONE));
        partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
        // This will trigger rebalance.
        coordinator.poll(time.timer(Long.MAX_VALUE));
        // Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain only one topic.
        assertEquals(singleton(topic1), subscriptions.subscription());
        // Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger
        // rebalance again.
        metadata.requestUpdate();
        consumerClient.poll(time.timer(Long.MAX_VALUE));
        assertFalse(coordinator.rejoinNeededOrPending());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Metrics(org.apache.kafka.common.metrics.Metrics) HashMap(java.util.HashMap) SyncGroupRequest(org.apache.kafka.common.requests.SyncGroupRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Test(org.junit.jupiter.api.Test)

Example 33 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class ConsumerMetadataTest method testBasicSubscription.

private void testBasicSubscription(Set<String> expectedTopics, Set<String> expectedInternalTopics) {
    Set<String> allTopics = new HashSet<>();
    allTopics.addAll(expectedTopics);
    allTopics.addAll(expectedInternalTopics);
    ConsumerMetadata metadata = newConsumerMetadata(false);
    MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder();
    assertEquals(allTopics, new HashSet<>(builder.topics()));
    List<MetadataResponse.TopicMetadata> topics = new ArrayList<>();
    for (String expectedTopic : expectedTopics) topics.add(topicMetadata(expectedTopic, false));
    for (String expectedInternalTopic : expectedInternalTopics) topics.add(topicMetadata(expectedInternalTopic, true));
    MetadataResponse response = RequestTestUtils.metadataResponse(singletonList(node), "clusterId", node.id(), topics);
    metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds());
    assertEquals(allTopics, metadata.fetch().topics());
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) HashSet(java.util.HashSet)

Example 34 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class ConsumerMetadataTest method testPatternSubscription.

private void testPatternSubscription(boolean includeInternalTopics) {
    subscription.subscribe(Pattern.compile("__.*"), new NoOpConsumerRebalanceListener());
    ConsumerMetadata metadata = newConsumerMetadata(includeInternalTopics);
    MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder();
    assertTrue(builder.isAllTopics());
    List<MetadataResponse.TopicMetadata> topics = new ArrayList<>();
    topics.add(topicMetadata("__consumer_offsets", true));
    topics.add(topicMetadata("__matching_topic", false));
    topics.add(topicMetadata("non_matching_topic", false));
    MetadataResponse response = RequestTestUtils.metadataResponse(singletonList(node), "clusterId", node.id(), topics);
    metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds());
    if (includeInternalTopics)
        assertEquals(Utils.mkSet("__matching_topic", "__consumer_offsets"), metadata.fetch().topics());
    else
        assertEquals(Collections.singleton("__matching_topic"), metadata.fetch().topics());
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 35 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class KafkaAdminClientTest method testAlterPartitionReassignments.

@Test
public void testAlterPartitionReassignments() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        TopicPartition tp1 = new TopicPartition("A", 0);
        TopicPartition tp2 = new TopicPartition("B", 0);
        Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments = new HashMap<>();
        reassignments.put(tp1, Optional.empty());
        reassignments.put(tp2, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
        // 1. server returns less responses than number of partitions we sent
        AlterPartitionReassignmentsResponseData responseData1 = new AlterPartitionReassignmentsResponseData();
        ReassignablePartitionResponse normalPartitionResponse = new ReassignablePartitionResponse().setPartitionIndex(0);
        responseData1.setResponses(Collections.singletonList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(responseData1));
        AlterPartitionReassignmentsResult result1 = env.adminClient().alterPartitionReassignments(reassignments);
        Future<Void> future1 = result1.all();
        Future<Void> future2 = result1.values().get(tp1);
        TestUtils.assertFutureError(future1, UnknownServerException.class);
        TestUtils.assertFutureError(future2, UnknownServerException.class);
        // 2. NOT_CONTROLLER error handling
        AlterPartitionReassignmentsResponseData controllerErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.NOT_CONTROLLER.code()).setErrorMessage(Errors.NOT_CONTROLLER.message()).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
        MetadataResponse controllerNodeResponse = RequestTestUtils.metadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.emptyList());
        AlterPartitionReassignmentsResponseData normalResponse = new AlterPartitionReassignmentsResponseData().setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(controllerErrResponseData));
        env.kafkaClient().prepareResponse(controllerNodeResponse);
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(normalResponse));
        AlterPartitionReassignmentsResult controllerErrResult = env.adminClient().alterPartitionReassignments(reassignments);
        controllerErrResult.all().get();
        controllerErrResult.values().get(tp1).get();
        controllerErrResult.values().get(tp2).get();
        // 3. partition-level error
        AlterPartitionReassignmentsResponseData partitionLevelErrData = new AlterPartitionReassignmentsResponseData().setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(Errors.INVALID_REPLICA_ASSIGNMENT.message()).setErrorCode(Errors.INVALID_REPLICA_ASSIGNMENT.code()))), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(partitionLevelErrData));
        AlterPartitionReassignmentsResult partitionLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments);
        TestUtils.assertFutureError(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass());
        partitionLevelErrResult.values().get(tp2).get();
        // 4. top-level error
        String errorMessage = "this is custom error message";
        AlterPartitionReassignmentsResponseData topLevelErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()).setErrorMessage(errorMessage).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(topLevelErrResponseData));
        AlterPartitionReassignmentsResult topLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments);
        assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.all(), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
        assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp1), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
        assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp2), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
        // 5. unrepresentable topic name error
        TopicPartition invalidTopicTP = new TopicPartition("", 0);
        TopicPartition invalidPartitionTP = new TopicPartition("ABC", -1);
        Map<TopicPartition, Optional<NewPartitionReassignment>> invalidTopicReassignments = new HashMap<>();
        invalidTopicReassignments.put(invalidPartitionTP, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
        invalidTopicReassignments.put(invalidTopicTP, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
        invalidTopicReassignments.put(tp1, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
        AlterPartitionReassignmentsResponseData singlePartResponseData = new AlterPartitionReassignmentsResponseData().setResponses(Collections.singletonList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(singlePartResponseData));
        AlterPartitionReassignmentsResult unrepresentableTopicResult = env.adminClient().alterPartitionReassignments(invalidTopicReassignments);
        TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class);
        TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class);
        unrepresentableTopicResult.values().get(tp1).get();
        // Test success scenario
        AlterPartitionReassignmentsResponseData noErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.NONE.code()).setErrorMessage(Errors.NONE.message()).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
        env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(noErrResponseData));
        AlterPartitionReassignmentsResult noErrResult = env.adminClient().alterPartitionReassignments(reassignments);
        noErrResult.all().get();
        noErrResult.values().get(tp1).get();
        noErrResult.values().get(tp2).get();
    }
}
Also used : Optional(java.util.Optional) HashMap(java.util.HashMap) ReassignablePartitionResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse) AlterPartitionReassignmentsResponse(org.apache.kafka.common.requests.AlterPartitionReassignmentsResponse) ReassignableTopicResponse(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse) AlterPartitionReassignmentsResponseData(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)107 Test (org.junit.jupiter.api.Test)71 HashMap (java.util.HashMap)68 TopicPartition (org.apache.kafka.common.TopicPartition)43 MockTime (org.apache.kafka.common.utils.MockTime)38 Time (org.apache.kafka.common.utils.Time)37 Node (org.apache.kafka.common.Node)33 ArrayList (java.util.ArrayList)30 MockClient (org.apache.kafka.clients.MockClient)29 Cluster (org.apache.kafka.common.Cluster)29 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)28 Errors (org.apache.kafka.common.protocol.Errors)27 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)27 Map (java.util.Map)26 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)26 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)26 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)25 List (java.util.List)24 LogContext (org.apache.kafka.common.utils.LogContext)22 HashSet (java.util.HashSet)21