use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testFetcherSessionEpochUpdate.
@Test
public void testFetcherSessionEpochUpdate() throws Exception {
buildFetcher(2);
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataResponse);
assignFromUser(Collections.singleton(tp0));
subscriptions.seek(tp0, 0L);
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
long nextOffset = 0;
long nextEpoch = 0;
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
int epoch = fetchRequest.metadata().epoch();
assertTrue(epoch == 0 || epoch == nextEpoch, String.format("Unexpected epoch expected %d got %d", nextEpoch, epoch));
nextEpoch++;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
responseMap.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(nextOffset + 2).setLastStableOffset(nextOffset + 2).setLogStartOffset(0).setRecords(buildRecords(nextOffset, 2, nextOffset)));
nextOffset += 2;
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
long nextFetchOffset = 0;
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
List<ConsumerRecord<byte[], byte[]>> records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(nextFetchOffset, records.get(0).offset());
assertEquals(nextFetchOffset + 1, records.get(1).offset());
nextFetchOffset += 2;
}
assertTrue(fetchedRecords().isEmpty());
}
}
assertEquals(0, future.get());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffsetV0.
@Test
public void testGetOffsetsForTimesWithUnknownOffsetV0() {
buildFetcher();
// Empty map
assertTrue(fetcher.offsetsForTimes(new HashMap<>(), time.timer(100L)).isEmpty());
// Unknown Offset
client.reset();
// Ensure metadata has both partition.
MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataUpdate);
// Force LIST_OFFSETS version 0
Node node = metadata.fetch().nodes().get(0);
apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 0));
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOldStyleOffsets(Collections.emptyList())))));
client.prepareResponseFrom(new ListOffsetsResponse(data), metadata.fetch().leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testListOffsetUpdateEpoch.
@Test
public void testListOffsetUpdateEpoch() {
buildFetcher();
// Set up metadata with leaderEpoch=1
subscriptions.assignFromUser(singleton(tp0));
MetadataResponse metadataWithLeaderEpochs = RequestTestUtils.metadataUpdateWithIds("kafka-cluster", 1, Collections.emptyMap(), singletonMap(topicName, 4), tp -> 1, topicIds);
client.updateMetadata(metadataWithLeaderEpochs);
// Reset offsets to trigger ListOffsets call
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
// Now we see a ListOffsets with leaderEpoch=2 epoch, we trigger a metadata update
client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, 1), listOffsetResponse(tp0, Errors.NONE, 1L, 5L, 2));
fetcher.resetOffsetsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertTrue(metadata.updateRequested());
assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals((long) epoch, 2));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsIncludesLeaderEpoch.
@Test
public void testGetOffsetsIncludesLeaderEpoch() {
buildFetcher();
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(initialUpdateResponse);
// Metadata update with leader epochs
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap(topicName, 4), tp -> 99, topicIds);
client.updateMetadata(metadataResponse);
// Request latest offset
subscriptions.requestOffsetReset(tp0);
fetcher.resetOffsetsIfNeeded();
// Check for epoch in outgoing request
MockClient.RequestMatcher matcher = body -> {
if (body instanceof ListOffsetsRequest) {
ListOffsetsRequest offsetRequest = (ListOffsetsRequest) body;
int epoch = offsetRequest.topics().get(0).partitions().get(0).currentLeaderEpoch();
assertTrue(epoch != ListOffsetsResponse.UNKNOWN_EPOCH, "Expected Fetcher to set leader epoch in request");
assertEquals(epoch, 99, "Expected leader epoch to match epoch from metadata update");
return true;
} else {
fail("Should have seen ListOffsetRequest");
return false;
}
};
client.prepareResponse(matcher, listOffsetResponse(Errors.NONE, 1L, 5L));
consumerClient.pollNoWakeup();
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithSendToInvalidTopic.
@Test
public void testCommitTransactionWithSendToInvalidTopic() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, emptyMap());
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
// Invalid topic name due to space
String invalidTopicName = "topic abc";
ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka");
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList()));
MetadataResponse updateResponse = RequestTestUtils.metadataResponse(new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata);
client.prepareMetadataUpdate(updateResponse);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(record), InvalidTopicException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
Aggregations