Search in sources :

Example 46 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class KafkaProducerTest method testMeasureAbortTransactionDuration.

@Test
public void testMeasureAbortTransactionDuration() {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    Time time = new MockTime(1);
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    MockClient client = new MockClient(time, metadata);
    client.updateMetadata(initialUpdateResponse);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
        producer.initTransactions();
        client.prepareResponse(endTxnResponse(Errors.NONE));
        producer.beginTransaction();
        producer.abortTransaction();
        double first = getMetricValue(producer, "txn-abort-time-ns-total");
        assertTrue(first > 0);
        client.prepareResponse(endTxnResponse(Errors.NONE));
        producer.beginTransaction();
        producer.abortTransaction();
        assertTrue(getMetricValue(producer, "txn-abort-time-ns-total") > first);
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 47 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class KafkaProducerTest method testCommitTransactionWithRecordTooLargeException.

@Test
public void testCommitTransactionWithRecordTooLargeException() throws Exception {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1000);
    Time time = new MockTime(1);
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
    ProducerMetadata metadata = mock(ProducerMetadata.class);
    MockClient client = new MockClient(time, metadata);
    client.updateMetadata(initialUpdateResponse);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    when(metadata.fetch()).thenReturn(onePartitionCluster);
    String largeString = IntStream.range(0, 1000).mapToObj(i -> "*").collect(Collectors.joining());
    ProducerRecord<String, String> largeRecord = new ProducerRecord<>(topic, "large string", largeString);
    try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
        producer.initTransactions();
        client.prepareResponse(endTxnResponse(Errors.NONE));
        producer.beginTransaction();
        TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class);
        assertThrows(KafkaException.class, producer::commitTransaction);
    }
}
Also used : Selectable(org.apache.kafka.common.network.Selectable) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Sender(org.apache.kafka.clients.producer.internals.Sender) KafkaException(org.apache.kafka.common.KafkaException) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) RecordBatch(org.apache.kafka.common.record.RecordBatch) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockProducerInterceptor(org.apache.kafka.test.MockProducerInterceptor) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) Sensor(org.apache.kafka.common.metrics.Sensor) TestUtils(org.apache.kafka.test.TestUtils) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) Metrics(org.apache.kafka.common.metrics.Metrics) Stream(java.util.stream.Stream) MockMetricsReporter(org.apache.kafka.test.MockMetricsReporter) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) ArgumentMatchers.notNull(org.mockito.ArgumentMatchers.notNull) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Mockito.mock(org.mockito.Mockito.mock) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ArrayList(java.util.ArrayList) InterruptException(org.apache.kafka.common.errors.InterruptException) EndTxnResponseData(org.apache.kafka.common.message.EndTxnResponseData) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) ManagementFactory(java.lang.management.ManagementFactory) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) SslConfigs(org.apache.kafka.common.config.SslConfigs) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) ProducerInterceptors(org.apache.kafka.clients.producer.internals.ProducerInterceptors) ValueSource(org.junit.jupiter.params.provider.ValueSource) TxnOffsetCommitResponse(org.apache.kafka.common.requests.TxnOffsetCommitResponse) Properties(java.util.Properties) Mockito.times(org.mockito.Mockito.times) ExecutionException(java.util.concurrent.ExecutionException) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AddOffsetsToTxnResponse(org.apache.kafka.common.requests.AddOffsetsToTxnResponse) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Serializer(org.apache.kafka.common.serialization.Serializer) Avg(org.apache.kafka.common.metrics.stats.Avg) TxnOffsetCommitRequestData(org.apache.kafka.common.message.TxnOffsetCommitRequestData) Exchanger(java.util.concurrent.Exchanger) FindCoordinatorResponse(org.apache.kafka.common.requests.FindCoordinatorResponse) JoinGroupRequest(org.apache.kafka.common.requests.JoinGroupRequest) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) KafkaClient(org.apache.kafka.clients.KafkaClient) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) TopicPartition(org.apache.kafka.common.TopicPartition) InitProducerIdResponseData(org.apache.kafka.common.message.InitProducerIdResponseData) Time(org.apache.kafka.common.utils.Time) EndTxnResponse(org.apache.kafka.common.requests.EndTxnResponse) Collection(java.util.Collection) ObjectName(javax.management.ObjectName) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) List(java.util.List) InitProducerIdResponse(org.apache.kafka.common.requests.InitProducerIdResponse) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) IntStream(java.util.stream.IntStream) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) MBeanServer(javax.management.MBeanServer) Collections.singletonMap(java.util.Collections.singletonMap) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) ExecutorService(java.util.concurrent.ExecutorService) MockPartitioner(org.apache.kafka.test.MockPartitioner) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) TxnOffsetCommitRequest(org.apache.kafka.common.requests.TxnOffsetCommitRequest) Mockito.when(org.mockito.Mockito.when) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) ConfigException(org.apache.kafka.common.config.ConfigException) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) MockSerializer(org.apache.kafka.test.MockSerializer) Collections(java.util.Collections) AddOffsetsToTxnResponseData(org.apache.kafka.common.message.AddOffsetsToTxnResponseData) ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 48 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class KafkaProducerTest method testCloseIsForcedOnPendingInitProducerId.

@Test
public void testCloseIsForcedOnPendingInitProducerId() throws InterruptedException {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
    Time time = new MockTime();
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    CountDownLatch assertionDoneLatch = new CountDownLatch(1);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "this-is-a-transactional-id", NODE));
    executorService.submit(() -> {
        assertThrows(KafkaException.class, producer::initTransactions);
        assertionDoneLatch.countDown();
    });
    client.waitForRequests(1, 2000);
    producer.close(Duration.ofMillis(1000));
    assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) CountDownLatch(java.util.concurrent.CountDownLatch) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) ExecutorService(java.util.concurrent.ExecutorService) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 49 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class FetcherTest method testGetOffsetsForTimesWhenSomeTopicPartitionLeadersNotKnownInitially.

@Test
public void testGetOffsetsForTimesWhenSomeTopicPartitionLeadersNotKnownInitially() {
    buildFetcher();
    subscriptions.assignFromUser(mkSet(tp0, tp1));
    final String anotherTopic = "another-topic";
    final TopicPartition t2p0 = new TopicPartition(anotherTopic, 0);
    client.reset();
    // Metadata initially has one topic
    MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(3, singletonMap(topicName, 2), topicIds);
    client.updateMetadata(initialMetadata);
    // The first metadata refresh should contain one topic
    client.prepareMetadataUpdate(initialMetadata);
    client.prepareResponseFrom(listOffsetResponse(tp0, Errors.NONE, 1000L, 11L), metadata.fetch().leaderFor(tp0));
    client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, 1000L, 32L), metadata.fetch().leaderFor(tp1));
    // Second metadata refresh should contain two topics
    Map<String, Integer> partitionNumByTopic = new HashMap<>();
    partitionNumByTopic.put(topicName, 2);
    partitionNumByTopic.put(anotherTopic, 1);
    topicIds.put("another-topic", Uuid.randomUuid());
    MetadataResponse updatedMetadata = RequestTestUtils.metadataUpdateWithIds(3, partitionNumByTopic, topicIds);
    client.prepareMetadataUpdate(updatedMetadata);
    client.prepareResponseFrom(listOffsetResponse(t2p0, Errors.NONE, 1000L, 54L), metadata.fetch().leaderFor(t2p0));
    Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
    timestampToSearch.put(tp0, ListOffsetsRequest.LATEST_TIMESTAMP);
    timestampToSearch.put(tp1, ListOffsetsRequest.LATEST_TIMESTAMP);
    timestampToSearch.put(t2p0, ListOffsetsRequest.LATEST_TIMESTAMP);
    Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
    assertNotNull(offsetAndTimestampMap.get(tp0), "Expect Fetcher.offsetsForTimes() to return non-null result for " + tp0);
    assertNotNull(offsetAndTimestampMap.get(tp1), "Expect Fetcher.offsetsForTimes() to return non-null result for " + tp1);
    assertNotNull(offsetAndTimestampMap.get(t2p0), "Expect Fetcher.offsetsForTimes() to return non-null result for " + t2p0);
    assertEquals(11L, offsetAndTimestampMap.get(tp0).offset());
    assertEquals(32L, offsetAndTimestampMap.get(tp1).offset());
    assertEquals(54L, offsetAndTimestampMap.get(t2p0).offset());
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Test(org.junit.jupiter.api.Test)

Example 50 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class FetcherTest method testGetOffsetsForTimesWithError.

private void testGetOffsetsForTimesWithError(Errors errorForP0, Errors errorForP1, long offsetForP0, long offsetForP1, Long expectedOffsetForP0, Long expectedOffsetForP1) {
    client.reset();
    String topicName2 = "topic2";
    TopicPartition t2p0 = new TopicPartition(topicName2, 0);
    // Expect a metadata refresh.
    metadata.bootstrap(ClientUtils.parseAndValidateAddresses(Collections.singletonList("1.1.1.1:1111"), ClientDnsLookup.USE_ALL_DNS_IPS));
    Map<String, Integer> partitionNumByTopic = new HashMap<>();
    partitionNumByTopic.put(topicName, 2);
    partitionNumByTopic.put(topicName2, 1);
    MetadataResponse updateMetadataResponse = RequestTestUtils.metadataUpdateWithIds(2, partitionNumByTopic, topicIds);
    Cluster updatedCluster = updateMetadataResponse.buildCluster();
    // The metadata refresh should contain all the topics.
    client.prepareMetadataUpdate(updateMetadataResponse, true);
    // First try should fail due to metadata error.
    client.prepareResponseFrom(listOffsetResponse(t2p0, errorForP0, offsetForP0, offsetForP0), updatedCluster.leaderFor(t2p0));
    client.prepareResponseFrom(listOffsetResponse(tp1, errorForP1, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1));
    // Second try should succeed.
    client.prepareResponseFrom(listOffsetResponse(t2p0, Errors.NONE, offsetForP0, offsetForP0), updatedCluster.leaderFor(t2p0));
    client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1));
    Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
    timestampToSearch.put(t2p0, 0L);
    timestampToSearch.put(tp1, 0L);
    Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
    if (expectedOffsetForP0 == null)
        assertNull(offsetAndTimestampMap.get(t2p0));
    else {
        assertEquals(expectedOffsetForP0.longValue(), offsetAndTimestampMap.get(t2p0).timestamp());
        assertEquals(expectedOffsetForP0.longValue(), offsetAndTimestampMap.get(t2p0).offset());
    }
    if (expectedOffsetForP1 == null)
        assertNull(offsetAndTimestampMap.get(tp1));
    else {
        assertEquals(expectedOffsetForP1.longValue(), offsetAndTimestampMap.get(tp1).timestamp());
        assertEquals(expectedOffsetForP1.longValue(), offsetAndTimestampMap.get(tp1).offset());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Cluster(org.apache.kafka.common.Cluster) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp)

Aggregations

MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)107 Test (org.junit.jupiter.api.Test)71 HashMap (java.util.HashMap)68 TopicPartition (org.apache.kafka.common.TopicPartition)43 MockTime (org.apache.kafka.common.utils.MockTime)38 Time (org.apache.kafka.common.utils.Time)37 Node (org.apache.kafka.common.Node)33 ArrayList (java.util.ArrayList)30 MockClient (org.apache.kafka.clients.MockClient)29 Cluster (org.apache.kafka.common.Cluster)29 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)28 Errors (org.apache.kafka.common.protocol.Errors)27 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)27 Map (java.util.Map)26 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)26 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)26 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)25 List (java.util.List)24 LogContext (org.apache.kafka.common.utils.LogContext)22 HashSet (java.util.HashSet)21