use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testReturnRecordsDuringRebalance.
@Test
public void testReturnRecordsDuringRebalance() throws InterruptedException {
Time time = new MockTime(1L);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
ConsumerPartitionAssignor assignor = new CooperativeStickyAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
initMetadata(client, Utils.mkMap(Utils.mkEntry(topic, 1), Utils.mkEntry(topic2, 1), Utils.mkEntry(topic3, 1)));
consumer.subscribe(Arrays.asList(topic, topic2), getConsumerRebalanceListener(consumer));
Node node = metadata.fetch().nodes().get(0);
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0, t2p0), null);
// a poll with non-zero milliseconds would complete three round-trips (discover, join, sync)
TestUtils.waitForCondition(() -> {
consumer.poll(Duration.ofMillis(100L));
return consumer.assignment().equals(Utils.mkSet(tp0, t2p0));
}, "Does not complete rebalance in time");
assertEquals(Utils.mkSet(topic, topic2), consumer.subscription());
assertEquals(Utils.mkSet(tp0, t2p0), consumer.assignment());
// prepare a response of the outstanding fetch so that we have data available on the next poll
Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
fetches1.put(tp0, new FetchInfo(0, 1));
fetches1.put(t2p0, new FetchInfo(0, 10));
client.respondFrom(fetchResponse(fetches1), node);
ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO);
// verify that the fetch occurred as expected
assertEquals(11, records.count());
assertEquals(1L, consumer.position(tp0));
assertEquals(10L, consumer.position(t2p0));
// prepare the next response of the prefetch
fetches1.clear();
fetches1.put(tp0, new FetchInfo(1, 1));
fetches1.put(t2p0, new FetchInfo(10, 20));
client.respondFrom(fetchResponse(fetches1), node);
// subscription change
consumer.subscribe(Arrays.asList(topic, topic3), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertEquals(Utils.mkSet(topic, topic3), consumer.subscription());
assertEquals(Utils.mkSet(tp0, t2p0), consumer.assignment());
// mock the offset commit response for to be revoked partitions
Map<TopicPartition, Long> partitionOffsets1 = new HashMap<>();
partitionOffsets1.put(t2p0, 10L);
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, partitionOffsets1);
// poll once which would not complete the rebalance
records = consumer.poll(Duration.ZERO);
// clear out the prefetch so it doesn't interfere with the rest of the test
fetches1.clear();
fetches1.put(tp0, new FetchInfo(2, 1));
client.respondFrom(fetchResponse(fetches1), node);
// verify that the fetch still occurred as expected
assertEquals(Utils.mkSet(topic, topic3), consumer.subscription());
assertEquals(Collections.singleton(tp0), consumer.assignment());
assertEquals(1, records.count());
assertEquals(2L, consumer.position(tp0));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
// mock rebalance responses
client.respondFrom(joinGroupFollowerResponse(assignor, 2, "memberId", "leaderId", Errors.NONE), coordinator);
// we need to poll 1) for getting the join response, and then send the sync request;
// 2) for getting the sync response
records = consumer.poll(Duration.ZERO);
// should not finish the response yet
assertEquals(Utils.mkSet(topic, topic3), consumer.subscription());
assertEquals(Collections.singleton(tp0), consumer.assignment());
assertEquals(1, records.count());
assertEquals(3L, consumer.position(tp0));
fetches1.clear();
fetches1.put(tp0, new FetchInfo(3, 1));
client.respondFrom(fetchResponse(fetches1), node);
// now complete the rebalance
client.respondFrom(syncGroupResponse(Arrays.asList(tp0, t3p0), Errors.NONE), coordinator);
AtomicInteger count = new AtomicInteger(0);
TestUtils.waitForCondition(() -> {
ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(100L));
return consumer.assignment().equals(Utils.mkSet(tp0, t3p0)) && count.addAndGet(recs.count()) == 1;
}, "Does not complete rebalance in time");
// should have t3 but not sent yet the t3 records
assertEquals(Utils.mkSet(topic, topic3), consumer.subscription());
assertEquals(Utils.mkSet(tp0, t3p0), consumer.assignment());
assertEquals(4L, consumer.position(tp0));
assertEquals(0L, consumer.position(t3p0));
fetches1.clear();
fetches1.put(tp0, new FetchInfo(4, 1));
fetches1.put(t3p0, new FetchInfo(0, 100));
client.respondFrom(fetchResponse(fetches1), node);
count.set(0);
TestUtils.waitForCondition(() -> {
ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(100L));
return count.addAndGet(recs.count()) == 101;
}, "Does not complete rebalance in time");
assertEquals(5L, consumer.position(tp0));
assertEquals(100L, consumer.position(t3p0));
client.requests().clear();
consumer.unsubscribe();
consumer.close();
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate.
@SuppressWarnings("deprecation")
@Test
public void verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.poll(0L);
// The underlying client SHOULD get a fetch request
final Queue<ClientRequest> requests = client.requests();
assertEquals(1, requests.size());
final Class<? extends AbstractRequest.Builder> aClass = requests.peek().requestBuilder().getClass();
assertEquals(FetchRequest.Builder.class, aClass);
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testOffsetIsValidAfterSeek.
@Test
public void testOffsetIsValidAfterSeek() {
SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.LATEST);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupId, Optional.empty(), false);
consumer.assign(singletonList(tp0));
consumer.seek(tp0, 20L);
consumer.poll(Duration.ZERO);
assertEquals(subscription.validPosition(tp0).offset, 20L);
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testPollThrowsInterruptExceptionIfInterrupted.
@Test
public void testPollThrowsInterruptExceptionIfInterrupted() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
// interrupt the thread and call poll
try {
Thread.currentThread().interrupt();
assertThrows(InterruptException.class, () -> consumer.poll(Duration.ZERO));
} finally {
// clear interrupted state again since this thread may be reused by JUnit
Thread.interrupted();
consumer.close(Duration.ofMillis(0));
}
}
use of org.apache.kafka.clients.consumer.internals.ConsumerMetadata in project kafka by apache.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitEnabled.
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
initMetadata(client, tpCounts);
Node node = metadata.fetch().nodes().get(0);
ConsumerPartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
// lookup coordinator
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(singleton(tp0));
consumer.seekToBeginning(singleton(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset());
// verify that assignment immediately changes
assertEquals(consumer.assignment(), singleton(tp0));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L)));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// mock the offset commit response for to be revoked partitions
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);
// new manual assignment
consumer.assign(singleton(t2p0));
// verify that assignment immediately changes
assertEquals(consumer.assignment(), singleton(t2p0));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
client.requests().clear();
consumer.close();
}
Aggregations