use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testPartitionsForNonExistingTopic.
@Test
public void testPartitionsForNonExistingTopic() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Cluster cluster = metadata.fetch();
MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), Collections.emptyList());
client.prepareResponse(updateResponse);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
assertEquals(Collections.emptyList(), consumer.partitionsFor("non-exist-topic"));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testOffsetOfPausedPartitions.
@Test
public void testOffsetOfPausedPartitions() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 2));
Node node = metadata.fetch().nodes().get(0);
ConsumerPartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
// lookup coordinator
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
Set<TopicPartition> partitions = Utils.mkSet(tp0, tp1);
consumer.assign(partitions);
// verify consumer's assignment
assertEquals(partitions, consumer.assignment());
consumer.pause(partitions);
consumer.seekToEnd(partitions);
// fetch and verify committed offset of two partitions
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, 0L);
offsets.put(tp1, 0L);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset());
offsets.remove(tp0);
offsets.put(tp1, 0L);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(0, consumer.committed(Collections.singleton(tp1)).get(tp1).offset());
// fetch and verify consumer's position in the two partitions
final Map<TopicPartition, Long> offsetResponse = new HashMap<>();
offsetResponse.put(tp0, 3L);
offsetResponse.put(tp1, 3L);
client.prepareResponse(listOffsetsResponse(offsetResponse));
assertEquals(3L, consumer.position(tp0));
assertEquals(3L, consumer.position(tp1));
client.requests().clear();
consumer.unsubscribe();
consumer.close();
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method consumerCloseTest.
private void consumerCloseTest(final long closeTimeoutMs, List<? extends AbstractResponse> responses, long waitMs, boolean interrupt) throws Exception {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, Optional.empty());
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null);
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(topic, 1), topicIds));
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
// Poll with responses
client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
consumer.poll(Duration.ZERO);
// Initiate close() after a commit request on another thread.
// Kafka consumer is single-threaded, but the implementation allows calls on a
// different thread as long as the calls are not executed concurrently. So this is safe.
ExecutorService executor = Executors.newSingleThreadExecutor();
final AtomicReference<Exception> closeException = new AtomicReference<>();
try {
Future<?> future = executor.submit(() -> {
consumer.commitAsync();
try {
consumer.close(Duration.ofMillis(closeTimeoutMs));
} catch (Exception e) {
closeException.set(e);
}
});
// if close timeout is not zero.
try {
future.get(100, TimeUnit.MILLISECONDS);
if (closeTimeoutMs != 0)
fail("Close completed without waiting for commit or leave response");
} catch (TimeoutException e) {
// Expected exception
}
// Ensure close has started and queued at least one more request after commitAsync
client.waitForRequests(2, 1000);
// In non-graceful mode, close() times out without an exception even though commit response is pending
for (int i = 0; i < responses.size(); i++) {
client.waitForRequests(1, 1000);
client.respondFrom(responses.get(i), coordinator);
if (i != responses.size() - 1) {
try {
future.get(100, TimeUnit.MILLISECONDS);
fail("Close completed without waiting for response");
} catch (TimeoutException e) {
// Expected exception
}
}
}
if (waitMs > 0)
time.sleep(waitMs);
if (interrupt) {
assertTrue(future.cancel(true), "Close terminated prematurely");
TestUtils.waitForCondition(() -> closeException.get() != null, "InterruptException did not occur within timeout.");
assertTrue(closeException.get() instanceof InterruptException, "Expected exception not thrown " + closeException);
} else {
// Should succeed without TimeoutException or ExecutionException
future.get(500, TimeUnit.MILLISECONDS);
assertNull(closeException.get(), "Unexpected exception during close");
}
} finally {
executor.shutdownNow();
}
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit.
@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
// create a consumer with groupID with manual assignment
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.assign(singleton(tp0));
// 1st coordinator error should cause coordinator unknown
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.COORDINATOR_NOT_AVAILABLE, groupId, node), node);
consumer.poll(Duration.ofMillis(0));
// 2nd coordinator error should find the correct coordinator and clear the findCoordinatorFuture
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 50L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(0));
assertEquals(5, records.count());
assertEquals(55L, consumer.position(tp0));
// after coordinator found, consumer should be able to commit the offset successfully
client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE)));
consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(55L)));
// verify the offset is committed
client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 55L), Errors.NONE));
assertEquals(55, consumer.committed(Collections.singleton(tp0), Duration.ZERO).get(tp0).offset());
consumer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testListOffsetShouldUpateSubscriptions.
@Test
public void testListOffsetShouldUpateSubscriptions() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
initMetadata(client, singletonMap(topic, 1));
final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.assign(singleton(tp0));
// poll once to update with the current metadata
consumer.poll(Duration.ofMillis(0));
client.respond(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, metadata.fetch().nodes().get(0)));
consumer.seek(tp0, 50L);
client.prepareResponse(listOffsetsResponse(singletonMap(tp0, 90L)));
assertEquals(singletonMap(tp0, 90L), consumer.endOffsets(Collections.singleton(tp0)));
// correct lag result should be returned as well
assertEquals(OptionalLong.of(40L), consumer.currentLag(tp0));
consumer.close(Duration.ZERO);
}
Aggregations