use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaProducerTest method testMeasureAbortTransactionDuration.
@Test
public void testMeasureAbortTransactionDuration() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
client.prepareResponse(endTxnResponse(Errors.NONE));
producer.beginTransaction();
producer.abortTransaction();
double first = getMetricValue(producer, "txn-abort-time-ns-total");
assertTrue(first > 0);
client.prepareResponse(endTxnResponse(Errors.NONE));
producer.beginTransaction();
producer.abortTransaction();
assertTrue(getMetricValue(producer, "txn-abort-time-ns-total") > first);
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithRecordTooLargeException.
@Test
public void testCommitTransactionWithRecordTooLargeException() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1000);
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
when(metadata.fetch()).thenReturn(onePartitionCluster);
String largeString = IntStream.range(0, 1000).mapToObj(i -> "*").collect(Collectors.joining());
ProducerRecord<String, String> largeRecord = new ProducerRecord<>(topic, "large string", largeString);
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
client.prepareResponse(endTxnResponse(Errors.NONE));
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaProducerTest method testCloseIsForcedOnPendingInitProducerId.
@Test
public void testCloseIsForcedOnPendingInitProducerId() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
ExecutorService executorService = Executors.newSingleThreadExecutor();
CountDownLatch assertionDoneLatch = new CountDownLatch(1);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "this-is-a-transactional-id", NODE));
executorService.submit(() -> {
assertThrows(KafkaException.class, producer::initTransactions);
assertionDoneLatch.countDown();
});
client.waitForRequests(1, 2000);
producer.close(Duration.ofMillis(1000));
assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWhenSomeTopicPartitionLeadersNotKnownInitially.
@Test
public void testGetOffsetsForTimesWhenSomeTopicPartitionLeadersNotKnownInitially() {
buildFetcher();
subscriptions.assignFromUser(mkSet(tp0, tp1));
final String anotherTopic = "another-topic";
final TopicPartition t2p0 = new TopicPartition(anotherTopic, 0);
client.reset();
// Metadata initially has one topic
MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(3, singletonMap(topicName, 2), topicIds);
client.updateMetadata(initialMetadata);
// The first metadata refresh should contain one topic
client.prepareMetadataUpdate(initialMetadata);
client.prepareResponseFrom(listOffsetResponse(tp0, Errors.NONE, 1000L, 11L), metadata.fetch().leaderFor(tp0));
client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, 1000L, 32L), metadata.fetch().leaderFor(tp1));
// Second metadata refresh should contain two topics
Map<String, Integer> partitionNumByTopic = new HashMap<>();
partitionNumByTopic.put(topicName, 2);
partitionNumByTopic.put(anotherTopic, 1);
topicIds.put("another-topic", Uuid.randomUuid());
MetadataResponse updatedMetadata = RequestTestUtils.metadataUpdateWithIds(3, partitionNumByTopic, topicIds);
client.prepareMetadataUpdate(updatedMetadata);
client.prepareResponseFrom(listOffsetResponse(t2p0, Errors.NONE, 1000L, 54L), metadata.fetch().leaderFor(t2p0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, ListOffsetsRequest.LATEST_TIMESTAMP);
timestampToSearch.put(tp1, ListOffsetsRequest.LATEST_TIMESTAMP);
timestampToSearch.put(t2p0, ListOffsetsRequest.LATEST_TIMESTAMP);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertNotNull(offsetAndTimestampMap.get(tp0), "Expect Fetcher.offsetsForTimes() to return non-null result for " + tp0);
assertNotNull(offsetAndTimestampMap.get(tp1), "Expect Fetcher.offsetsForTimes() to return non-null result for " + tp1);
assertNotNull(offsetAndTimestampMap.get(t2p0), "Expect Fetcher.offsetsForTimes() to return non-null result for " + t2p0);
assertEquals(11L, offsetAndTimestampMap.get(tp0).offset());
assertEquals(32L, offsetAndTimestampMap.get(tp1).offset());
assertEquals(54L, offsetAndTimestampMap.get(t2p0).offset());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithError.
private void testGetOffsetsForTimesWithError(Errors errorForP0, Errors errorForP1, long offsetForP0, long offsetForP1, Long expectedOffsetForP0, Long expectedOffsetForP1) {
client.reset();
String topicName2 = "topic2";
TopicPartition t2p0 = new TopicPartition(topicName2, 0);
// Expect a metadata refresh.
metadata.bootstrap(ClientUtils.parseAndValidateAddresses(Collections.singletonList("1.1.1.1:1111"), ClientDnsLookup.USE_ALL_DNS_IPS));
Map<String, Integer> partitionNumByTopic = new HashMap<>();
partitionNumByTopic.put(topicName, 2);
partitionNumByTopic.put(topicName2, 1);
MetadataResponse updateMetadataResponse = RequestTestUtils.metadataUpdateWithIds(2, partitionNumByTopic, topicIds);
Cluster updatedCluster = updateMetadataResponse.buildCluster();
// The metadata refresh should contain all the topics.
client.prepareMetadataUpdate(updateMetadataResponse, true);
// First try should fail due to metadata error.
client.prepareResponseFrom(listOffsetResponse(t2p0, errorForP0, offsetForP0, offsetForP0), updatedCluster.leaderFor(t2p0));
client.prepareResponseFrom(listOffsetResponse(tp1, errorForP1, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1));
// Second try should succeed.
client.prepareResponseFrom(listOffsetResponse(t2p0, Errors.NONE, offsetForP0, offsetForP0), updatedCluster.leaderFor(t2p0));
client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(t2p0, 0L);
timestampToSearch.put(tp1, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
if (expectedOffsetForP0 == null)
assertNull(offsetAndTimestampMap.get(t2p0));
else {
assertEquals(expectedOffsetForP0.longValue(), offsetAndTimestampMap.get(t2p0).timestamp());
assertEquals(expectedOffsetForP0.longValue(), offsetAndTimestampMap.get(t2p0).offset());
}
if (expectedOffsetForP1 == null)
assertNull(offsetAndTimestampMap.get(tp1));
else {
assertEquals(expectedOffsetForP1.longValue(), offsetAndTimestampMap.get(tp1).timestamp());
assertEquals(expectedOffsetForP1.longValue(), offsetAndTimestampMap.get(tp1).offset());
}
}
Aggregations