use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithMetadataTimeoutForMissingTopic.
@Test
public void testCommitTransactionWithMetadataTimeoutForMissingTopic() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
// Create a record for a not-yet-created topic
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockTime mockTime = new MockTime();
MockClient client = new MockClient(mockTime, metadata);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
AtomicInteger invocationCount = new AtomicInteger(0);
when(metadata.fetch()).then(invocation -> {
invocationCount.incrementAndGet();
if (invocationCount.get() > 5) {
mockTime.setCurrentTimeMs(mockTime.milliseconds() + 70000);
}
return emptyCluster;
});
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, mockTime)) {
producer.initTransactions();
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(record), TimeoutException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testFlushMeasureLatency.
@Test
public void testFlushMeasureLatency() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.flush();
double first = getMetricValue(producer, "flush-time-ns-total");
assertTrue(first > 0);
producer.flush();
assertTrue(getMetricValue(producer, "flush-time-ns-total") > first);
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testFlushCompleteSendOfInflightBatches.
@Test
public void testFlushCompleteSendOfInflightBatches() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
// only test in idempotence disabled producer for simplicity
// flush operation acts the same for idempotence enabled and disabled cases
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
ArrayList<Future<RecordMetadata>> futureResponses = new ArrayList<>();
for (int i = 0; i < 50; i++) {
Future<RecordMetadata> response = producer.send(new ProducerRecord<>("topic", "value" + i));
futureResponses.add(response);
}
futureResponses.forEach(res -> assertFalse(res.isDone()));
producer.flush();
futureResponses.forEach(res -> assertTrue(res.isDone()));
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testSendTxnOffsetsWithGroupMetadata.
@Test
public void testSendTxnOffsetsWithGroupMetadata() {
final short maxVersion = (short) 3;
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.TXN_OFFSET_COMMIT.id, (short) 0, maxVersion));
Node node = metadata.fetch().nodes().get(0);
client.throttle(node, 5000);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
client.prepareResponse(addOffsetsToTxnResponse(Errors.NONE));
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
String groupId = "group";
String memberId = "member";
int generationId = 5;
String groupInstanceId = "instance";
client.prepareResponse(request -> {
TxnOffsetCommitRequestData data = ((TxnOffsetCommitRequest) request).data();
return data.groupId().equals(groupId) && data.memberId().equals(memberId) && data.generationId() == generationId && data.groupInstanceId().equals(groupInstanceId);
}, txnOffsetsCommitResponse(Collections.singletonMap(new TopicPartition("topic", 0), Errors.NONE)));
client.prepareResponse(endTxnResponse(Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata(groupId, generationId, memberId, Optional.of(groupInstanceId));
producer.sendOffsetsToTransaction(Collections.emptyMap(), groupMetadata);
producer.commitTransaction();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testMetadataFetch.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataFetch(boolean isIdempotenceEnabled) throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, isIdempotenceEnabled);
ProducerMetadata metadata = mock(ProducerMetadata.class);
// Return empty cluster 4 times and cluster from then on
when(metadata.fetch()).thenReturn(emptyCluster, emptyCluster, emptyCluster, emptyCluster, onePartitionCluster);
KafkaProducer<String, String> producer = producerWithOverrideNewSender(configs, metadata);
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
producer.send(record);
// One request update for each empty cluster returned
verify(metadata, times(4)).requestUpdateForTopic(topic);
verify(metadata, times(4)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(5)).fetch();
// Should not request update for subsequent `send`
producer.send(record, null);
verify(metadata, times(4)).requestUpdateForTopic(topic);
verify(metadata, times(4)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(6)).fetch();
// Should not request update for subsequent `partitionsFor`
producer.partitionsFor(topic);
verify(metadata, times(4)).requestUpdateForTopic(topic);
verify(metadata, times(4)).awaitUpdate(anyInt(), anyLong());
verify(metadata, times(7)).fetch();
producer.close(Duration.ofMillis(0));
}
Aggregations