use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testWakeupWithFetchDataAvailable.
@Test
public void testWakeupWithFetchDataAvailable() throws Exception {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
// respond to the outstanding fetch so that we have data available on the next poll
client.respondFrom(fetchResponse(tp0, 0, 5), node);
client.poll(0, time.milliseconds());
consumer.wakeup();
assertThrows(WakeupException.class, () -> consumer.poll(Duration.ZERO));
// make sure the position hasn't been updated
assertEquals(0, consumer.position(tp0));
// the next poll should return the completed fetch
ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO);
assertEquals(5, records.count());
// Increment time asynchronously to clear timeouts in closing the consumer
final ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor();
exec.scheduleAtFixedRate(() -> time.sleep(sessionTimeoutMs), 0L, 10L, TimeUnit.MILLISECONDS);
consumer.close();
exec.shutdownNow();
exec.awaitTermination(5L, TimeUnit.SECONDS);
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testMissingOffsetNoResetPolicy.
@Test
public void testMissingOffsetNoResetPolicy() {
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// lookup committed offset and find nothing
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator);
assertThrows(NoOffsetForPartitionException.class, () -> consumer.poll(Duration.ZERO));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testChangingRegexSubscription.
@Test
public void testChangingRegexSubscription() {
String otherTopic = "other";
TopicPartition otherTopicPartition = new TopicPartition(otherTopic, 0);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic, 1);
partitionCounts.put(otherTopic, 1);
topicIds.put(otherTopic, Uuid.randomUuid());
initMetadata(client, partitionCounts);
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
Node coordinator = prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null);
consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer));
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
assertEquals(singleton(topic), consumer.subscription());
consumer.subscribe(Pattern.compile(otherTopic), getConsumerRebalanceListener(consumer));
client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, topicIds));
prepareRebalance(client, node, singleton(otherTopic), assignor, singletonList(otherTopicPartition), coordinator);
consumer.poll(Duration.ZERO);
assertEquals(singleton(otherTopic), consumer.subscription());
consumer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testResetToCommittedOffset.
@Test
public void testResetToCommittedOffset() {
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator);
consumer.poll(Duration.ZERO);
assertEquals(539L, consumer.position(tp0));
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testRebalanceException.
@Test
public void testRebalanceException() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getExceptionConsumerRebalanceListener());
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator);
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator);
// assign throws
try {
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
fail("Should throw exception");
} catch (Throwable e) {
assertEquals(partitionAssigned + singleTopicPartition, e.getCause().getMessage());
}
// the assignment is still updated regardless of the exception
assertEquals(singleton(tp0), subscription.assignedPartitions());
// close's revoke throws
try {
consumer.close(Duration.ofMillis(0));
fail("Should throw exception");
} catch (Throwable e) {
assertEquals(partitionRevoked + singleTopicPartition, e.getCause().getCause().getMessage());
}
consumer.close(Duration.ofMillis(0));
// the assignment is still updated regardless of the exception
assertTrue(subscription.assignedPartitions().isEmpty());
}
Aggregations