use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testInitTransactionTimeout.
@Test
public void testInitTransactionTimeout() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "bad-transaction");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 500);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
client.prepareResponse(request -> request instanceof FindCoordinatorRequest && ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE));
assertThrows(TimeoutException.class, producer::initTransactions);
client.prepareResponse(request -> request instanceof FindCoordinatorRequest && ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
// retry initialization should work
producer.initTransactions();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testMetadataExpiry.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataExpiry(boolean isIdempotenceEnabled) throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, isIdempotenceEnabled);
ProducerMetadata metadata = mock(ProducerMetadata.class);
when(metadata.fetch()).thenReturn(onePartitionCluster, emptyCluster, onePartitionCluster);
KafkaProducer<String, String> producer = producerWithOverrideNewSender(configs, metadata);
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
producer.send(record);
// Verify the topic's metadata isn't requested since it's already present.
verify(metadata, times(0)).requestUpdateForTopic(topic);
verify(metadata, times(0)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(1)).fetch();
// The metadata has been expired. Verify the producer requests the topic's metadata.
producer.send(record, null);
verify(metadata, times(1)).requestUpdateForTopic(topic);
verify(metadata, times(1)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(3)).fetch();
producer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method verifyInvalidGroupMetadata.
private void verifyInvalidGroupMetadata(ConsumerGroupMetadata groupMetadata) {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
Node node = metadata.fetch().nodes().get(0);
client.throttle(node, 5000);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
assertThrows(IllegalArgumentException.class, () -> producer.sendOffsetsToTransaction(Collections.emptyMap(), groupMetadata));
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method shouldCloseProperlyAndThrowIfInterrupted.
@Test
public void shouldCloseProperlyAndThrowIfInterrupted() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName());
configs.put(ProducerConfig.BATCH_SIZE_CONFIG, "1");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
final Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
ExecutorService executor = Executors.newSingleThreadExecutor();
final AtomicReference<Exception> closeException = new AtomicReference<>();
try {
Future<?> future = executor.submit(() -> {
producer.send(new ProducerRecord<>("topic", "key", "value"));
try {
producer.close();
fail("Close should block and throw.");
} catch (Exception e) {
closeException.set(e);
}
});
// Close producer should not complete until send succeeds
try {
future.get(100, TimeUnit.MILLISECONDS);
fail("Close completed without waiting for send");
} catch (java.util.concurrent.TimeoutException expected) {
/* ignore */
}
// Ensure send has started
client.waitForRequests(1, 1000);
assertTrue(future.cancel(true), "Close terminated prematurely");
TestUtils.waitForCondition(() -> closeException.get() != null, "InterruptException did not occur within timeout.");
assertTrue(closeException.get() instanceof InterruptException, "Expected exception not thrown " + closeException);
} finally {
executor.shutdownNow();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithSendToInvalidTopic.
@Test
public void testCommitTransactionWithSendToInvalidTopic() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, emptyMap());
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
// Invalid topic name due to space
String invalidTopicName = "topic abc";
ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka");
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList()));
MetadataResponse updateResponse = RequestTestUtils.metadataResponse(new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata);
client.prepareMetadataUpdate(updateResponse);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(record), InvalidTopicException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
Aggregations