use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class ConsumerNetworkClient method checkDisconnects.
private void checkDisconnects(long now) {
// and set the disconnect flag in the ClientResponse
for (Node node : unsent.nodes()) {
if (client.connectionFailed(node)) {
// Remove entry before invoking request callback to avoid callbacks handling
// coordinator failures traversing the unsent list again.
Collection<ClientRequest> requests = unsent.remove(node);
for (ClientRequest request : requests) {
RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback();
AuthenticationException authenticationException = client.authenticationException(node);
handler.onComplete(new ClientResponse(request.makeHeader(request.requestBuilder().latestAllowedVersion()), request.callback(), request.destination(), request.createdTimeMs(), now, true, null, authenticationException, null));
}
}
}
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitDisabled.
@Test
public void testManualAssignmentChangeWithAutoCommitDisabled() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
initMetadata(client, tpCounts);
Node node = metadata.fetch().nodes().get(0);
ConsumerPartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
// lookup coordinator
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(singleton(tp0));
consumer.seekToBeginning(singleton(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset());
// verify that assignment immediately changes
assertEquals(consumer.assignment(), singleton(tp0));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L)));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1));
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// new manual assignment
consumer.assign(singleton(t2p0));
// verify that assignment immediately changes
assertEquals(consumer.assignment(), singleton(t2p0));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertNotSame(req.requestBuilder().apiKey(), ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class KafkaConsumerTest method verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate.
@SuppressWarnings("deprecation")
@Test
public void verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.poll(0L);
// The underlying client SHOULD get a fetch request
final Queue<ClientRequest> requests = client.requests();
assertEquals(1, requests.size());
final Class<? extends AbstractRequest.Builder> aClass = requests.peek().requestBuilder().getClass();
assertEquals(FetchRequest.Builder.class, aClass);
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class KafkaConsumerTest method verifyPollTimesOutDuringMetadataUpdate.
@Test
public void verifyPollTimesOutDuringMetadataUpdate() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
final KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
// Since we would enable the heartbeat thread after received join-response which could
// send the sync-group on behalf of the consumer if it is enqueued, we may still complete
// the rebalance and send out the fetch; in order to avoid it we do not prepare sync response here.
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator);
consumer.poll(Duration.ZERO);
final Queue<ClientRequest> requests = client.requests();
assertEquals(0, requests.stream().filter(request -> request.apiKey().equals(ApiKeys.FETCH)).count());
}
use of org.apache.kafka.clients.ClientRequest in project kafka by apache.
the class FetcherTest method testFetcherSessionEpochUpdate.
@Test
public void testFetcherSessionEpochUpdate() throws Exception {
buildFetcher(2);
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataResponse);
assignFromUser(Collections.singleton(tp0));
subscriptions.seek(tp0, 0L);
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
long nextOffset = 0;
long nextEpoch = 0;
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
int epoch = fetchRequest.metadata().epoch();
assertTrue(epoch == 0 || epoch == nextEpoch, String.format("Unexpected epoch expected %d got %d", nextEpoch, epoch));
nextEpoch++;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
responseMap.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(nextOffset + 2).setLastStableOffset(nextOffset + 2).setLogStartOffset(0).setRecords(buildRecords(nextOffset, 2, nextOffset)));
nextOffset += 2;
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
long nextFetchOffset = 0;
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
List<ConsumerRecord<byte[], byte[]>> records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(nextFetchOffset, records.get(0).offset());
assertEquals(nextFetchOffset + 1, records.get(1).offset());
nextFetchOffset += 2;
}
assertTrue(fetchedRecords().isEmpty());
}
}
assertEquals(0, future.get());
}
Aggregations