use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testInitTransactionWhileThrottled.
@Test
public void testInitTransactionWhileThrottled() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
Node node = metadata.fetch().nodes().get(0);
client.throttle(node, 5000);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testCloseIsForcedOnPendingFindCoordinator.
@Test
public void testCloseIsForcedOnPendingFindCoordinator() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
ExecutorService executorService = Executors.newSingleThreadExecutor();
CountDownLatch assertionDoneLatch = new CountDownLatch(1);
executorService.submit(() -> {
assertThrows(KafkaException.class, producer::initTransactions);
assertionDoneLatch.countDown();
});
client.waitForRequests(1, 2000);
producer.close(Duration.ofMillis(1000));
assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange.
@Test
public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
// Create a record with a partition higher than the initial (outdated) partition range
ProducerRecord<String, String> record = new ProducerRecord<>(topic, 2, null, "value");
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockTime mockTime = new MockTime();
MockClient client = new MockClient(mockTime, metadata);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
AtomicInteger invocationCount = new AtomicInteger(0);
when(metadata.fetch()).then(invocation -> {
invocationCount.incrementAndGet();
if (invocationCount.get() > 5) {
mockTime.setCurrentTimeMs(mockTime.milliseconds() + 70000);
}
return onePartitionCluster;
});
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, mockTime)) {
producer.initTransactions();
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(record), TimeoutException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testTopicExpiryInMetadata.
@Test
public void testTopicExpiryInMetadata() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "30000");
long refreshBackoffMs = 500L;
long metadataExpireMs = 60000L;
long metadataIdleMs = 60000L;
final Time time = new MockTime();
final ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, metadataExpireMs, metadataIdleMs, new LogContext(), new ClusterResourceListeners(), time);
final String topic = "topic";
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, new MockClient(time, metadata), null, time)) {
Exchanger<Void> exchanger = new Exchanger<>();
Thread t = new Thread(() -> {
try {
// 1
exchanger.exchange(null);
while (!metadata.updateRequested()) Thread.sleep(100);
MetadataResponse updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
// 2
exchanger.exchange(null);
time.sleep(120 * 1000L);
// Update the metadata again, but it should be expired at this point.
updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
// 3
exchanger.exchange(null);
while (!metadata.updateRequested()) Thread.sleep(100);
time.sleep(30 * 1000L);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
t.start();
// 1
exchanger.exchange(null);
assertNotNull(producer.partitionsFor(topic));
// 2
exchanger.exchange(null);
// 3
exchanger.exchange(null);
assertThrows(TimeoutException.class, () -> producer.partitionsFor(topic));
t.join();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testMetadataTimeoutWithPartitionOutOfRange.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataTimeoutWithPartitionOutOfRange(boolean isIdempotenceEnabled) throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, isIdempotenceEnabled);
// Create a record with a partition higher than the initial (outdated) partition range
ProducerRecord<String, String> record = new ProducerRecord<>(topic, 2, null, "value");
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockTime mockTime = new MockTime();
AtomicInteger invocationCount = new AtomicInteger(0);
when(metadata.fetch()).then(invocation -> {
invocationCount.incrementAndGet();
if (invocationCount.get() == 5) {
mockTime.setCurrentTimeMs(mockTime.milliseconds() + 70000);
}
return onePartitionCluster;
});
KafkaProducer<String, String> producer = producerWithOverrideNewSender(configs, metadata, mockTime);
// Four request updates where the requested partition is out of range, at which point the timeout expires
// and a TimeoutException is thrown
// For idempotence enabled case, the first and last metadata.fetch will be called in Sender#maybeSendAndPollTransactionalRequest,
// before the producer#send and after it finished
Future<RecordMetadata> future = producer.send(record);
verify(metadata, times(4)).requestUpdateForTopic(topic);
verify(metadata, times(4)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(5)).fetch();
try {
future.get();
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
} finally {
producer.close(Duration.ofMillis(0));
}
}
Aggregations