Search in sources :

Example 16 with ConsumerMetadata

use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.

the class KafkaConsumerTest method testInvalidGroupMetadata.

@Test
public void testInvalidGroupMetadata() throws InterruptedException {
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    initMetadata(client, Collections.singletonMap(topic, 1));
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId);
    consumer.subscribe(singletonList(topic));
    // concurrent access is illegal
    client.enableBlockingUntilWakeup(1);
    ExecutorService service = Executors.newSingleThreadExecutor();
    service.execute(() -> consumer.poll(Duration.ofSeconds(5)));
    try {
        TimeUnit.SECONDS.sleep(1);
        assertThrows(ConcurrentModificationException.class, consumer::groupMetadata);
        client.wakeup();
        consumer.wakeup();
    } finally {
        service.shutdown();
        assertTrue(service.awaitTermination(10, TimeUnit.SECONDS));
    }
    // accessing closed consumer is illegal
    consumer.close(Duration.ZERO);
    assertThrows(IllegalStateException.class, consumer::groupMetadata);
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 17 with ConsumerMetadata

use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.

the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitEnabled.

/**
 * Verify that when a consumer changes its topic subscription its assigned partitions
 * do not immediately change, and the latest consumed offsets of its to-be-revoked
 * partitions are properly committed (when auto-commit is enabled).
 * Upon unsubscribing from subscribed topics the consumer subscription and assignment
 * are both updated right away but its consumed offsets are not auto committed.
 */
@Test
public void testSubscriptionChangesWithAutoCommitEnabled() {
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    tpCounts.put(topic3, 1);
    initMetadata(client, tpCounts);
    Node node = metadata.fetch().nodes().get(0);
    ConsumerPartitionAssignor assignor = new RangeAssignor();
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
    // initial subscription
    consumer.subscribe(Arrays.asList(topic, topic2), getConsumerRebalanceListener(consumer));
    // verify that subscription has changed but assignment is still unchanged
    assertEquals(2, consumer.subscription().size());
    assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
    assertTrue(consumer.assignment().isEmpty());
    // mock rebalance responses
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0, t2p0), null);
    consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
    consumer.poll(Duration.ZERO);
    // verify that subscription is still the same, and now assignment has caught up
    assertEquals(2, consumer.subscription().size());
    assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
    assertEquals(2, consumer.assignment().size());
    assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
    // mock a response to the outstanding fetch so that we have data available on the next poll
    Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
    fetches1.put(tp0, new FetchInfo(0, 1));
    fetches1.put(t2p0, new FetchInfo(0, 10));
    client.respondFrom(fetchResponse(fetches1), node);
    client.poll(0, time.milliseconds());
    ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
    // clear out the prefetch so it doesn't interfere with the rest of the test
    fetches1.put(tp0, new FetchInfo(1, 0));
    fetches1.put(t2p0, new FetchInfo(10, 0));
    client.respondFrom(fetchResponse(fetches1), node);
    client.poll(0, time.milliseconds());
    // verify that the fetch occurred as expected
    assertEquals(11, records.count());
    assertEquals(1L, consumer.position(tp0));
    assertEquals(10L, consumer.position(t2p0));
    // subscription change
    consumer.subscribe(Arrays.asList(topic, topic3), getConsumerRebalanceListener(consumer));
    // verify that subscription has changed but assignment is still unchanged
    assertEquals(2, consumer.subscription().size());
    assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
    assertEquals(2, consumer.assignment().size());
    assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
    // mock the offset commit response for to be revoked partitions
    Map<TopicPartition, Long> partitionOffsets1 = new HashMap<>();
    partitionOffsets1.put(tp0, 1L);
    partitionOffsets1.put(t2p0, 10L);
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, partitionOffsets1);
    // mock rebalance responses
    prepareRebalance(client, node, assignor, Arrays.asList(tp0, t3p0), coordinator);
    // mock a response to the next fetch from the new assignment
    Map<TopicPartition, FetchInfo> fetches2 = new HashMap<>();
    fetches2.put(tp0, new FetchInfo(1, 1));
    fetches2.put(t3p0, new FetchInfo(0, 100));
    client.prepareResponse(fetchResponse(fetches2));
    records = consumer.poll(Duration.ofMillis(1));
    // verify that the fetch occurred as expected
    assertEquals(101, records.count());
    assertEquals(2L, consumer.position(tp0));
    assertEquals(100L, consumer.position(t3p0));
    // verify that the offset commits occurred as expected
    assertTrue(commitReceived.get());
    // verify that subscription is still the same, and now assignment has caught up
    assertEquals(2, consumer.subscription().size());
    assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
    assertEquals(2, consumer.assignment().size());
    assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t3p0));
    consumer.unsubscribe();
    // verify that subscription and assignment are both cleared
    assertTrue(consumer.subscription().isEmpty());
    assertTrue(consumer.assignment().isEmpty());
    client.requests().clear();
    consumer.close();
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TopicPartition(org.apache.kafka.common.TopicPartition) OptionalLong(java.util.OptionalLong) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 18 with ConsumerMetadata

use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.

the class KafkaConsumerTest method verifyPollTimesOutDuringMetadataUpdate.

@Test
public void verifyPollTimesOutDuringMetadataUpdate() {
    final ConsumerMetadata metadata = createMetadata(subscription);
    final MockClient client = new MockClient(time, metadata);
    initMetadata(client, Collections.singletonMap(topic, 1));
    Node node = metadata.fetch().nodes().get(0);
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
    consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
    // Since we would enable the heartbeat thread after received join-response which could
    // send the sync-group on behalf of the consumer if it is enqueued, we may still complete
    // the rebalance and send out the fetch; in order to avoid it we do not prepare sync response here.
    client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator);
    consumer.poll(Duration.ZERO);
    final Queue<ClientRequest> requests = client.requests();
    assertEquals(0, requests.stream().filter(request -> request.apiKey().equals(ApiKeys.FETCH)).count());
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 19 with ConsumerMetadata

use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.

the class KafkaConsumerTest method testNoCommittedOffsets.

@Test
public void testNoCommittedOffsets() {
    long offset1 = 10000;
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    initMetadata(client, Collections.singletonMap(topic, 2));
    Node node = metadata.fetch().nodes().get(0);
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
    consumer.assign(Arrays.asList(tp0, tp1));
    // lookup coordinator
    client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    // fetch offset for one topic
    client.prepareResponseFrom(offsetResponse(Utils.mkMap(Utils.mkEntry(tp0, offset1), Utils.mkEntry(tp1, -1L)), Errors.NONE), coordinator);
    final Map<TopicPartition, OffsetAndMetadata> committed = consumer.committed(Utils.mkSet(tp0, tp1));
    assertEquals(2, committed.size());
    assertEquals(offset1, committed.get(tp0).offset());
    assertNull(committed.get(tp1));
    consumer.close(Duration.ofMillis(0));
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 20 with ConsumerMetadata

use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.

the class KafkaConsumerTest method fetchResponseWithUnexpectedPartitionIsIgnored.

@Test
public void fetchResponseWithUnexpectedPartitionIsIgnored() {
    ConsumerMetadata metadata = createMetadata(subscription);
    MockClient client = new MockClient(time, metadata);
    initMetadata(client, Collections.singletonMap(topic, 1));
    Node node = metadata.fetch().nodes().get(0);
    KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
    consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer));
    prepareRebalance(client, node, assignor, singletonList(tp0), null);
    Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
    fetches1.put(tp0, new FetchInfo(0, 1));
    // not assigned and not fetched
    fetches1.put(t2p0, new FetchInfo(0, 10));
    client.prepareResponseFrom(fetchResponse(fetches1), node);
    consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
    ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO);
    assertEquals(0, records.count());
    consumer.close(Duration.ofMillis(0));
}
Also used : ConsumerMetadata(org.apache.kafka.clients.consumer.internals.ConsumerMetadata) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Aggregations

MockClient (org.apache.kafka.clients.MockClient)46 ConsumerMetadata (org.apache.kafka.clients.consumer.internals.ConsumerMetadata)46 Test (org.junit.jupiter.api.Test)41 Node (org.apache.kafka.common.Node)35 HashMap (java.util.HashMap)10 LinkedHashMap (java.util.LinkedHashMap)10 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 TopicPartition (org.apache.kafka.common.TopicPartition)7 SubscriptionState (org.apache.kafka.clients.consumer.internals.SubscriptionState)6 LogContext (org.apache.kafka.common.utils.LogContext)6 MockTime (org.apache.kafka.common.utils.MockTime)5 Time (org.apache.kafka.common.utils.Time)5 OptionalLong (java.util.OptionalLong)4 ClientRequest (org.apache.kafka.clients.ClientRequest)4 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)3 Cluster (org.apache.kafka.common.Cluster)3 FetchRequest (org.apache.kafka.common.requests.FetchRequest)3 ExecutorService (java.util.concurrent.ExecutorService)2 ConsumerMetrics (org.apache.kafka.clients.consumer.internals.ConsumerMetrics)2