Search in sources :

Example 1 with FetchPosition

use of org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition in project kafka by apache.

the class Fetcher method initializeCompletedFetch.

/**
 * Initialize a CompletedFetch object.
 */
private CompletedFetch initializeCompletedFetch(CompletedFetch nextCompletedFetch) {
    TopicPartition tp = nextCompletedFetch.partition;
    FetchResponseData.PartitionData partition = nextCompletedFetch.partitionData;
    long fetchOffset = nextCompletedFetch.nextFetchOffset;
    CompletedFetch completedFetch = null;
    Errors error = Errors.forCode(partition.errorCode());
    try {
        if (!subscriptions.hasValidPosition(tp)) {
            // this can happen when a rebalance happened while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it no longer has valid position", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            FetchPosition position = subscriptions.position(tp);
            if (position == null || position.offset != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            log.trace("Preparing to read {} bytes of data for partition {} with offset {}", FetchResponse.recordsSize(partition), tp, position);
            Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator();
            completedFetch = nextCompletedFetch;
            if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark() >= 0) {
                log.trace("Updating high watermark for partition {} to {}", tp, partition.highWatermark());
                subscriptions.updateHighWatermark(tp, partition.highWatermark());
            }
            if (partition.logStartOffset() >= 0) {
                log.trace("Updating log start offset for partition {} to {}", tp, partition.logStartOffset());
                subscriptions.updateLogStartOffset(tp, partition.logStartOffset());
            }
            if (partition.lastStableOffset() >= 0) {
                log.trace("Updating last stable offset for partition {} to {}", tp, partition.lastStableOffset());
                subscriptions.updateLastStableOffset(tp, partition.lastStableOffset());
            }
            if (FetchResponse.isPreferredReplica(partition)) {
                subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> {
                    long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs();
                    log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}", tp, partition.preferredReadReplica(), expireTimeMs);
                    return expireTimeMs;
                });
            }
            nextCompletedFetch.initialized = true;
        } else if (error == Errors.NOT_LEADER_OR_FOLLOWER || error == Errors.REPLICA_NOT_AVAILABLE || error == Errors.KAFKA_STORAGE_ERROR || error == Errors.FENCED_LEADER_EPOCH || error == Errors.OFFSET_NOT_AVAILABLE) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_ID) {
            log.warn("Received unknown topic ID error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.INCONSISTENT_TOPIC_ID) {
            log.warn("Received inconsistent topic ID error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            Optional<Integer> clearedReplicaId = subscriptions.clearPreferredReadReplica(tp);
            if (!clearedReplicaId.isPresent()) {
                // If there's no preferred replica to clear, we're fetching from the leader so handle this error normally
                FetchPosition position = subscriptions.position(tp);
                if (position == null || fetchOffset != position.offset) {
                    log.debug("Discarding stale fetch response for partition {} since the fetched offset {} " + "does not match the current offset {}", tp, fetchOffset, position);
                } else {
                    handleOffsetOutOfRange(position, tp);
                }
            } else {
                log.debug("Unset the preferred read replica {} for partition {} since we got {} when fetching {}", clearedReplicaId.get(), tp, error, fetchOffset);
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            // we log the actual partition and not just the topic to help with ACL propagation issues in large clusters
            log.warn("Not authorized to read from partition {}.", tp);
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN_LEADER_EPOCH) {
            log.debug("Received unknown leader epoch error in fetch for partition {}", tp);
        } else if (error == Errors.UNKNOWN_SERVER_ERROR) {
            log.warn("Unknown server error while fetching offset {} for topic-partition {}", fetchOffset, tp);
        } else if (error == Errors.CORRUPT_MESSAGE) {
            throw new KafkaException("Encountered corrupt message when fetching offset " + fetchOffset + " for topic-partition " + tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching at offset " + fetchOffset + " from topic-partition " + tp);
        }
    } finally {
        if (completedFetch == null)
            nextCompletedFetch.metricAggregator.record(tp, 0, 0);
        if (error != Errors.NONE)
            // we move the partition to the end if there was an error. This way, it's more likely that partitions for
            // the same topic can remain together (allowing for more efficient serialization).
            subscriptions.movePartitionToEnd(tp);
    }
    return completedFetch;
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) CloseableIterator(org.apache.kafka.common.utils.CloseableIterator) Iterator(java.util.Iterator) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 2 with FetchPosition

use of org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition in project kafka by apache.

the class Fetcher method fetchRecords.

private Fetch<K, V> fetchRecords(CompletedFetch completedFetch, int maxRecords) {
    if (!subscriptions.isAssigned(completedFetch.partition)) {
        // this can happen when a rebalance happened before fetched records are returned to the consumer's poll call
        log.debug("Not returning fetched records for partition {} since it is no longer assigned", completedFetch.partition);
    } else if (!subscriptions.isFetchable(completedFetch.partition)) {
        // this can happen when a partition is paused before fetched records are returned to the consumer's
        // poll call or if the offset is being reset
        log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", completedFetch.partition);
    } else {
        FetchPosition position = subscriptions.position(completedFetch.partition);
        if (position == null) {
            throw new IllegalStateException("Missing position for fetchable partition " + completedFetch.partition);
        }
        if (completedFetch.nextFetchOffset == position.offset) {
            List<ConsumerRecord<K, V>> partRecords = completedFetch.fetchRecords(maxRecords);
            log.trace("Returning {} fetched records at offset {} for assigned partition {}", partRecords.size(), position, completedFetch.partition);
            boolean positionAdvanced = false;
            if (completedFetch.nextFetchOffset > position.offset) {
                FetchPosition nextPosition = new FetchPosition(completedFetch.nextFetchOffset, completedFetch.lastEpoch, position.currentLeader);
                log.trace("Updating fetch position from {} to {} for partition {} and returning {} records from `poll()`", position, nextPosition, completedFetch.partition, partRecords.size());
                subscriptions.position(completedFetch.partition, nextPosition);
                positionAdvanced = true;
            }
            Long partitionLag = subscriptions.partitionLag(completedFetch.partition, isolationLevel);
            if (partitionLag != null)
                this.sensors.recordPartitionLag(completedFetch.partition, partitionLag);
            Long lead = subscriptions.partitionLead(completedFetch.partition);
            if (lead != null) {
                this.sensors.recordPartitionLead(completedFetch.partition, lead);
            }
            return Fetch.forPartition(completedFetch.partition, partRecords, positionAdvanced);
        } else {
            // these records aren't next in line based on the last consumed position, ignore them
            // they must be from an obsolete request
            log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", completedFetch.partition, completedFetch.nextFetchOffset, position);
        }
    }
    log.trace("Draining fetched records for partition {}", completedFetch.partition);
    completedFetch.drain();
    return Fetch.empty();
}
Also used : FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) List(java.util.List) ArrayList(java.util.ArrayList)

Example 3 with FetchPosition

use of org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition in project kafka by apache.

the class Fetcher method resetOffsetIfNeeded.

// Visible for testing
void resetOffsetIfNeeded(TopicPartition partition, OffsetResetStrategy requestedResetStrategy, ListOffsetData offsetData) {
    FetchPosition position = new FetchPosition(offsetData.offset, // This will ensure we skip validation
    Optional.empty(), metadata.currentLeader(partition));
    offsetData.leaderEpoch.ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(partition, epoch));
    subscriptions.maybeSeekUnvalidated(partition, position, requestedResetStrategy);
}
Also used : FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition)

Example 4 with FetchPosition

use of org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition in project kafka by apache.

the class Fetcher method validateOffsetsAsync.

/**
 * For each partition which needs validation, make an asynchronous request to get the end-offsets for the partition
 * with the epoch less than or equal to the epoch the partition last saw.
 *
 * Requests are grouped by Node for efficiency.
 */
private void validateOffsetsAsync(Map<TopicPartition, FetchPosition> partitionsToValidate) {
    final Map<Node, Map<TopicPartition, FetchPosition>> regrouped = regroupFetchPositionsByLeader(partitionsToValidate);
    long nextResetTimeMs = time.milliseconds() + requestTimeoutMs;
    regrouped.forEach((node, fetchPositions) -> {
        if (node.isEmpty()) {
            metadata.requestUpdate();
            return;
        }
        NodeApiVersions nodeApiVersions = apiVersions.get(node.idString());
        if (nodeApiVersions == null) {
            client.tryConnect(node);
            return;
        }
        if (!hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) {
            log.debug("Skipping validation of fetch offsets for partitions {} since the broker does not " + "support the required protocol version (introduced in Kafka 2.3)", fetchPositions.keySet());
            for (TopicPartition partition : fetchPositions.keySet()) {
                subscriptions.completeValidation(partition);
            }
            return;
        }
        subscriptions.setNextAllowedRetry(fetchPositions.keySet(), nextResetTimeMs);
        RequestFuture<OffsetForEpochResult> future = offsetsForLeaderEpochClient.sendAsyncRequest(node, fetchPositions);
        future.addListener(new RequestFutureListener<OffsetForEpochResult>() {

            @Override
            public void onSuccess(OffsetForEpochResult offsetsResult) {
                List<SubscriptionState.LogTruncation> truncations = new ArrayList<>();
                if (!offsetsResult.partitionsToRetry().isEmpty()) {
                    subscriptions.setNextAllowedRetry(offsetsResult.partitionsToRetry(), time.milliseconds() + retryBackoffMs);
                    metadata.requestUpdate();
                }
                // For each OffsetsForLeader response, check if the end-offset is lower than our current offset
                // for the partition. If so, it means we have experienced log truncation and need to reposition
                // that partition's offset.
                // 
                // In addition, check whether the returned offset and epoch are valid. If not, then we should reset
                // its offset if reset policy is configured, or throw out of range exception.
                offsetsResult.endOffsets().forEach((topicPartition, respEndOffset) -> {
                    FetchPosition requestPosition = fetchPositions.get(topicPartition);
                    Optional<SubscriptionState.LogTruncation> truncationOpt = subscriptions.maybeCompleteValidation(topicPartition, requestPosition, respEndOffset);
                    truncationOpt.ifPresent(truncations::add);
                });
                if (!truncations.isEmpty()) {
                    maybeSetOffsetForLeaderException(buildLogTruncationException(truncations));
                }
            }

            @Override
            public void onFailure(RuntimeException e) {
                subscriptions.requestFailed(fetchPositions.keySet(), time.milliseconds() + retryBackoffMs);
                metadata.requestUpdate();
                if (!(e instanceof RetriableException)) {
                    maybeSetOffsetForLeaderException(e);
                }
            }
        });
    });
}
Also used : ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) PriorityQueue(java.util.PriorityQueue) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) KafkaException(org.apache.kafka.common.KafkaException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Cluster(org.apache.kafka.common.Cluster) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Value(org.apache.kafka.common.metrics.stats.Value) MessageFormatter(org.slf4j.helpers.MessageFormatter) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) RecordDeserializationException(org.apache.kafka.common.errors.RecordDeserializationException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Collectors(java.util.stream.Collectors) OffsetForEpochResult(org.apache.kafka.clients.consumer.internals.OffsetsForLeaderEpochClient.OffsetForEpochResult) CloseableIterator(org.apache.kafka.common.utils.CloseableIterator) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Queue(java.util.Queue) FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) ClientResponse(org.apache.kafka.clients.ClientResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Uuid(org.apache.kafka.common.Uuid) Max(org.apache.kafka.common.metrics.stats.Max) Headers(org.apache.kafka.common.header.Headers) Metadata(org.apache.kafka.clients.Metadata) StaleMetadataException(org.apache.kafka.clients.StaleMetadataException) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) Deserializer(org.apache.kafka.common.serialization.Deserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Utils(org.apache.kafka.common.utils.Utils) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) ApiVersions(org.apache.kafka.clients.ApiVersions) IsolationLevel(org.apache.kafka.common.IsolationLevel) CorruptRecordException(org.apache.kafka.common.errors.CorruptRecordException) WindowedCount(org.apache.kafka.common.metrics.stats.WindowedCount) Avg(org.apache.kafka.common.metrics.stats.Avg) Min(org.apache.kafka.common.metrics.stats.Min) Closeable(java.io.Closeable) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Meter(org.apache.kafka.common.metrics.stats.Meter) Gauge(org.apache.kafka.common.metrics.Gauge) ArrayDeque(java.util.ArrayDeque) Comparator(java.util.Comparator) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) Timer(org.apache.kafka.common.utils.Timer) Optional(java.util.Optional) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Node(org.apache.kafka.common.Node) FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) OffsetForEpochResult(org.apache.kafka.clients.consumer.internals.OffsetsForLeaderEpochClient.OffsetForEpochResult) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 5 with FetchPosition

use of org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition in project kafka by apache.

the class Fetcher method prepareFetchRequests.

/**
 * Create fetch requests for all nodes for which we have assigned partitions
 * that have no existing requests in flight.
 */
private Map<Node, FetchSessionHandler.FetchRequestData> prepareFetchRequests() {
    Map<Node, FetchSessionHandler.Builder> fetchable = new LinkedHashMap<>();
    validatePositionsOnMetadataChange();
    long currentTimeMs = time.milliseconds();
    Map<String, Uuid> topicIds = metadata.topicIds();
    for (TopicPartition partition : fetchablePartitions()) {
        FetchPosition position = this.subscriptions.position(partition);
        if (position == null) {
            throw new IllegalStateException("Missing position for fetchable partition " + partition);
        }
        Optional<Node> leaderOpt = position.currentLeader.leader;
        if (!leaderOpt.isPresent()) {
            log.debug("Requesting metadata update for partition {} since the position {} is missing the current leader node", partition, position);
            metadata.requestUpdate();
            continue;
        }
        // Use the preferred read replica if set, otherwise the position's leader
        Node node = selectReadReplica(partition, leaderOpt.get(), currentTimeMs);
        if (client.isUnavailable(node)) {
            client.maybeThrowAuthFailure(node);
            // If we try to send during the reconnect backoff window, then the request is just
            // going to be failed anyway before being sent, so skip the send for now
            log.trace("Skipping fetch for partition {} because node {} is awaiting reconnect backoff", partition, node);
        } else if (this.nodesWithPendingFetchRequests.contains(node.id())) {
            log.trace("Skipping fetch for partition {} because previous request to {} has not been processed", partition, node);
        } else {
            // if there is a leader and no in-flight requests, issue a new fetch
            FetchSessionHandler.Builder builder = fetchable.get(node);
            if (builder == null) {
                int id = node.id();
                FetchSessionHandler handler = sessionHandler(id);
                if (handler == null) {
                    handler = new FetchSessionHandler(logContext, id);
                    sessionHandlers.put(id, handler);
                }
                builder = handler.newBuilder();
                fetchable.put(node, builder);
            }
            builder.add(partition, new FetchRequest.PartitionData(topicIds.getOrDefault(partition.topic(), Uuid.ZERO_UUID), position.offset, FetchRequest.INVALID_LOG_START_OFFSET, this.fetchSize, position.currentLeader.epoch, Optional.empty()));
            log.debug("Added {} fetch request for partition {} at position {} to node {}", isolationLevel, partition, position, node);
        }
    }
    Map<Node, FetchSessionHandler.FetchRequestData> reqs = new LinkedHashMap<>();
    for (Map.Entry<Node, FetchSessionHandler.Builder> entry : fetchable.entrySet()) {
        reqs.put(entry.getKey(), entry.getValue().build());
    }
    return reqs;
}
Also used : Node(org.apache.kafka.common.Node) FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) LinkedHashMap(java.util.LinkedHashMap) Uuid(org.apache.kafka.common.Uuid) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

FetchPosition (org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition)5 HashMap (java.util.HashMap)3 LinkedHashMap (java.util.LinkedHashMap)3 Map (java.util.Map)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 ArrayList (java.util.ArrayList)2 Iterator (java.util.Iterator)2 List (java.util.List)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 FetchSessionHandler (org.apache.kafka.clients.FetchSessionHandler)2 Node (org.apache.kafka.common.Node)2 Uuid (org.apache.kafka.common.Uuid)2 Closeable (java.io.Closeable)1 ByteBuffer (java.nio.ByteBuffer)1 ArrayDeque (java.util.ArrayDeque)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Comparator (java.util.Comparator)1 HashSet (java.util.HashSet)1 Optional (java.util.Optional)1