use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testCommitsFetchedDuringAssign.
@Test
public void testCommitsFetchedDuringAssign() {
long offset1 = 10000;
long offset2 = 20000;
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.assign(singletonList(tp0));
// lookup coordinator
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset());
consumer.assign(Arrays.asList(tp0, tp1));
// fetch offset for two topics
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, offset1);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset());
offsets.remove(tp0);
offsets.put(tp1, offset2);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset2, consumer.committed(Collections.singleton(tp1)).get(tp1).offset());
consumer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testFetchProgressWithMissingPartitionPosition.
@Test
public void testFetchProgressWithMissingPartitionPosition() {
// Verifies that we can make progress on one partition while we are awaiting
// a reset on another partition.
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumerNoAutoCommit(time, client, subscription, metadata);
consumer.assign(Arrays.asList(tp0, tp1));
consumer.seekToEnd(singleton(tp0));
consumer.seekToBeginning(singleton(tp1));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
client.prepareResponse(body -> {
ListOffsetsRequest request = (ListOffsetsRequest) body;
List<ListOffsetsPartition> partitions = request.topics().stream().flatMap(t -> {
if (t.name().equals(topic))
return Stream.of(t.partitions());
else
return Stream.empty();
}).flatMap(List::stream).collect(Collectors.toList());
ListOffsetsPartition expectedTp0 = new ListOffsetsPartition().setPartitionIndex(tp0.partition()).setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP);
ListOffsetsPartition expectedTp1 = new ListOffsetsPartition().setPartitionIndex(tp1.partition()).setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP);
return partitions.contains(expectedTp0) && partitions.contains(expectedTp1);
}, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_OR_FOLLOWER)));
client.prepareResponse(body -> {
FetchRequest request = (FetchRequest) body;
Map<TopicIdPartition, FetchRequest.PartitionData> fetchData = request.fetchData(topicNames);
TopicIdPartition tidp0 = new TopicIdPartition(topicIds.get(tp0.topic()), tp0);
return fetchData.keySet().equals(singleton(tidp0)) && fetchData.get(tidp0).fetchOffset == 50L;
}, fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
assertEquals(5, records.count());
assertEquals(singleton(tp0), records.partitions());
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method setupThrowableConsumer.
private KafkaConsumer<String, String> setupThrowableConsumer() {
long offset1 = 10000;
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.OFFSET_FETCH.id, (short) 0, (short) 6));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, true);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator);
return consumer;
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method verifyNoCoordinatorLookupForManualAssignmentWithSeek.
@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, null, groupInstanceId, false);
consumer.assign(singleton(tp0));
consumer.seekToBeginning(singleton(tp0));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L)));
client.prepareResponse(fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
assertEquals(5, records.count());
assertEquals(55L, consumer.position(tp0));
consumer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method verifyHeartbeatSentWhenFetchedDataReady.
@Test
public void verifyHeartbeatSentWhenFetchedDataReady() throws Exception {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
// respond to the outstanding fetch so that we have data available on the next poll
client.respondFrom(fetchResponse(tp0, 0, 5), node);
client.poll(0, time.milliseconds());
client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);
AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator, Errors.NONE);
time.sleep(heartbeatIntervalMs);
Thread.sleep(heartbeatIntervalMs);
consumer.poll(Duration.ZERO);
assertTrue(heartbeatReceived.get());
consumer.close(Duration.ofMillis(0));
}
Aggregations