Search in sources :

Example 1 with FetchResponse

use of org.apache.kafka.common.requests.FetchResponse in project kafka by apache.

the class KafkaConsumerTest method fetchResponse.

private FetchResponse fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
    LinkedHashMap<TopicPartition, PartitionData> tpResponses = new LinkedHashMap<>();
    for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) {
        TopicPartition partition = fetchEntry.getKey();
        long fetchOffset = fetchEntry.getValue().offset;
        int fetchCount = fetchEntry.getValue().count;
        MemoryRecordsBuilder records = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, fetchOffset);
        for (int i = 0; i < fetchCount; i++) records.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes());
        tpResponses.put(partition, new FetchResponse.PartitionData(Errors.NONE, 0, records.build()));
    }
    return new FetchResponse(tpResponses, 0);
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) FetchResponse(org.apache.kafka.common.requests.FetchResponse) OffsetFetchResponse(org.apache.kafka.common.requests.OffsetFetchResponse) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Collections.singletonMap(java.util.Collections.singletonMap) LinkedHashMap(java.util.LinkedHashMap)

Example 2 with FetchResponse

use of org.apache.kafka.common.requests.FetchResponse in project kafka by apache.

the class Fetcher method sendFetches.

/**
     * Set-up a fetch request for any node that we have assigned partitions for which doesn't already have
     * an in-flight fetch or pending fetch data.
     * @return number of fetches sent
     */
public int sendFetches() {
    Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests();
    for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) {
        final FetchRequest.Builder request = fetchEntry.getValue();
        final Node fetchTarget = fetchEntry.getKey();
        log.debug("Sending fetch for partitions {} to broker {}", request.fetchData().keySet(), fetchTarget);
        client.send(fetchTarget, request).addListener(new RequestFutureListener<ClientResponse>() {

            @Override
            public void onSuccess(ClientResponse resp) {
                FetchResponse response = (FetchResponse) resp.responseBody();
                if (!matchesRequestedPartitions(request, response)) {
                    // obviously we expect the broker to always send us valid responses, so this check
                    // is mainly for test cases where mock fetch responses must be manually crafted.
                    log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet());
                    return;
                }
                Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet());
                FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions);
                for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) {
                    TopicPartition partition = entry.getKey();
                    long fetchOffset = request.fetchData().get(partition).offset;
                    FetchResponse.PartitionData fetchData = entry.getValue();
                    completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion()));
                }
                sensors.fetchLatency.record(resp.requestLatencyMs());
                sensors.fetchThrottleTimeSensor.record(response.throttleTimeMs());
            }

            @Override
            public void onFailure(RuntimeException e) {
                log.debug("Fetch request to {} for partitions {} failed", fetchTarget, request.fetchData().keySet(), e);
            }
        });
    }
    return fetchRequestMap.size();
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) Set(java.util.Set) HashSet(java.util.HashSet) Node(org.apache.kafka.common.Node) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LogEntry(org.apache.kafka.common.record.LogEntry) TopicPartition(org.apache.kafka.common.TopicPartition) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 Map (java.util.Map)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 FetchResponse (org.apache.kafka.common.requests.FetchResponse)2 Collections.singletonMap (java.util.Collections.singletonMap)1 HashSet (java.util.HashSet)1 Set (java.util.Set)1 ClientResponse (org.apache.kafka.clients.ClientResponse)1 Node (org.apache.kafka.common.Node)1 LogEntry (org.apache.kafka.common.record.LogEntry)1 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)1 FetchRequest (org.apache.kafka.common.requests.FetchRequest)1 PartitionData (org.apache.kafka.common.requests.FetchResponse.PartitionData)1 OffsetFetchResponse (org.apache.kafka.common.requests.OffsetFetchResponse)1