Search in sources :

Example 1 with ProducerMetadata

use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.

the class KafkaProducerTest method testInitTransactionWhileThrottled.

@Test
public void testInitTransactionWhileThrottled() {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    Time time = new MockTime(1);
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    MockClient client = new MockClient(time, metadata);
    client.updateMetadata(initialUpdateResponse);
    Node node = metadata.fetch().nodes().get(0);
    client.throttle(node, 5000);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
        producer.initTransactions();
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 2 with ProducerMetadata

use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.

the class KafkaProducerTest method testCloseIsForcedOnPendingFindCoordinator.

@Test
public void testCloseIsForcedOnPendingFindCoordinator() throws InterruptedException {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
    Time time = new MockTime();
    MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
    ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
    metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    CountDownLatch assertionDoneLatch = new CountDownLatch(1);
    executorService.submit(() -> {
        assertThrows(KafkaException.class, producer::initTransactions);
        assertionDoneLatch.countDown();
    });
    client.waitForRequests(1, 2000);
    producer.close(Duration.ofMillis(1000));
    assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) CountDownLatch(java.util.concurrent.CountDownLatch) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) ExecutorService(java.util.concurrent.ExecutorService) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 3 with ProducerMetadata

use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.

the class KafkaProducerTest method testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange.

@Test
public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() throws Exception {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
    // Create a record with a partition higher than the initial (outdated) partition range
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, 2, null, "value");
    ProducerMetadata metadata = mock(ProducerMetadata.class);
    MockTime mockTime = new MockTime();
    MockClient client = new MockClient(mockTime, metadata);
    client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
    client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
    AtomicInteger invocationCount = new AtomicInteger(0);
    when(metadata.fetch()).then(invocation -> {
        invocationCount.incrementAndGet();
        if (invocationCount.get() > 5) {
            mockTime.setCurrentTimeMs(mockTime.milliseconds() + 70000);
        }
        return onePartitionCluster;
    });
    try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, mockTime)) {
        producer.initTransactions();
        producer.beginTransaction();
        TestUtils.assertFutureError(producer.send(record), TimeoutException.class);
        assertThrows(KafkaException.class, producer::commitTransaction);
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 4 with ProducerMetadata

use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.

the class KafkaProducerTest method testTopicExpiryInMetadata.

@Test
public void testTopicExpiryInMetadata() throws InterruptedException {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "30000");
    long refreshBackoffMs = 500L;
    long metadataExpireMs = 60000L;
    long metadataIdleMs = 60000L;
    final Time time = new MockTime();
    final ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, metadataExpireMs, metadataIdleMs, new LogContext(), new ClusterResourceListeners(), time);
    final String topic = "topic";
    try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, new MockClient(time, metadata), null, time)) {
        Exchanger<Void> exchanger = new Exchanger<>();
        Thread t = new Thread(() -> {
            try {
                // 1
                exchanger.exchange(null);
                while (!metadata.updateRequested()) Thread.sleep(100);
                MetadataResponse updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
                metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
                // 2
                exchanger.exchange(null);
                time.sleep(120 * 1000L);
                // Update the metadata again, but it should be expired at this point.
                updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
                metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
                // 3
                exchanger.exchange(null);
                while (!metadata.updateRequested()) Thread.sleep(100);
                time.sleep(30 * 1000L);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });
        t.start();
        // 1
        exchanger.exchange(null);
        assertNotNull(producer.partitionsFor(topic));
        // 2
        exchanger.exchange(null);
        // 3
        exchanger.exchange(null);
        assertThrows(TimeoutException.class, () -> producer.partitionsFor(topic));
        t.join();
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) HashMap(java.util.HashMap) Exchanger(java.util.concurrent.Exchanger) LogContext(org.apache.kafka.common.utils.LogContext) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) KafkaException(org.apache.kafka.common.KafkaException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExecutionException(java.util.concurrent.ExecutionException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ConfigException(org.apache.kafka.common.config.ConfigException) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 5 with ProducerMetadata

use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.

the class KafkaProducerTest method testMetadataTimeoutWithPartitionOutOfRange.

@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataTimeoutWithPartitionOutOfRange(boolean isIdempotenceEnabled) throws Exception {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
    configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, isIdempotenceEnabled);
    // Create a record with a partition higher than the initial (outdated) partition range
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, 2, null, "value");
    ProducerMetadata metadata = mock(ProducerMetadata.class);
    MockTime mockTime = new MockTime();
    AtomicInteger invocationCount = new AtomicInteger(0);
    when(metadata.fetch()).then(invocation -> {
        invocationCount.incrementAndGet();
        if (invocationCount.get() == 5) {
            mockTime.setCurrentTimeMs(mockTime.milliseconds() + 70000);
        }
        return onePartitionCluster;
    });
    KafkaProducer<String, String> producer = producerWithOverrideNewSender(configs, metadata, mockTime);
    // Four request updates where the requested partition is out of range, at which point the timeout expires
    // and a TimeoutException is thrown
    // For idempotence enabled case, the first and last metadata.fetch will be called in Sender#maybeSendAndPollTransactionalRequest,
    // before the producer#send and after it finished
    Future<RecordMetadata> future = producer.send(record);
    verify(metadata, times(4)).requestUpdateForTopic(topic);
    verify(metadata, times(4)).awaitUpdate(anyInt(), anyLong());
    verify(metadata, times(5)).fetch();
    try {
        future.get();
    } catch (ExecutionException e) {
        assertTrue(e.getCause() instanceof TimeoutException);
    } finally {
        producer.close(Duration.ofMillis(0));
    }
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutionException(java.util.concurrent.ExecutionException) MockTime(org.apache.kafka.common.utils.MockTime) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

HashMap (java.util.HashMap)34 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)34 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)32 MockTime (org.apache.kafka.common.utils.MockTime)30 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)28 MockClient (org.apache.kafka.clients.MockClient)27 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)27 Test (org.junit.jupiter.api.Test)27 Time (org.apache.kafka.common.utils.Time)25 ExecutorService (java.util.concurrent.ExecutorService)8 ExecutionException (java.util.concurrent.ExecutionException)7 TimeoutException (org.apache.kafka.common.errors.TimeoutException)7 ValueSource (org.junit.jupiter.params.provider.ValueSource)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 Node (org.apache.kafka.common.Node)6 ArrayList (java.util.ArrayList)5 CountDownLatch (java.util.concurrent.CountDownLatch)5 ConsumerGroupMetadata (org.apache.kafka.clients.consumer.ConsumerGroupMetadata)5 KafkaException (org.apache.kafka.common.KafkaException)5 TopicPartition (org.apache.kafka.common.TopicPartition)5