Search in sources :

Example 1 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class Fetcher method handleListOffsetResponse.

/**
     * Callback for the response of the list offset call above.
     * @param timestampsToSearch The mapping from partitions to target timestamps
     * @param listOffsetResponse The response from the server.
     * @param future The future to be completed by the response.
     */
@SuppressWarnings("deprecation")
private void handleListOffsetResponse(Map<TopicPartition, Long> timestampsToSearch, ListOffsetResponse listOffsetResponse, RequestFuture<Map<TopicPartition, OffsetData>> future) {
    Map<TopicPartition, OffsetData> timestampOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
        TopicPartition topicPartition = entry.getKey();
        ListOffsetResponse.PartitionData partitionData = listOffsetResponse.responseData().get(topicPartition);
        Errors error = partitionData.error;
        if (error == Errors.NONE) {
            if (partitionData.offsets != null) {
                // Handle v0 response
                long offset;
                if (partitionData.offsets.size() > 1) {
                    future.raise(new IllegalStateException("Unexpected partitionData response of length " + partitionData.offsets.size()));
                    return;
                } else if (partitionData.offsets.isEmpty()) {
                    offset = ListOffsetResponse.UNKNOWN_OFFSET;
                } else {
                    offset = partitionData.offsets.get(0);
                }
                log.debug("Handling v0 ListOffsetResponse response for {}. Fetched offset {}", topicPartition, offset);
                if (offset != ListOffsetResponse.UNKNOWN_OFFSET) {
                    OffsetData offsetData = new OffsetData(offset, null);
                    timestampOffsetMap.put(topicPartition, offsetData);
                }
            } else {
                // Handle v1 and later response
                log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}", topicPartition, partitionData.offset, partitionData.timestamp);
                if (partitionData.offset != ListOffsetResponse.UNKNOWN_OFFSET) {
                    OffsetData offsetData = new OffsetData(partitionData.offset, partitionData.timestamp);
                    timestampOffsetMap.put(topicPartition, offsetData);
                }
            }
        } else if (error == Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT) {
            // The message format on the broker side is before 0.10.0, we simply put null in the response.
            log.debug("Cannot search by timestamp for partition {} because the message format version " + "is before 0.10.0", topicPartition);
            timestampOffsetMap.put(topicPartition, null);
        } else if (error == Errors.NOT_LEADER_FOR_PARTITION) {
            log.debug("Attempt to fetch offsets for partition {} failed due to obsolete leadership information, retrying.", topicPartition);
            future.raise(error);
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in ListOffset request for partition {}. The topic/partition " + "may not exist or the user may not have Describe access to it", topicPartition);
            future.raise(error);
        } else {
            log.warn("Attempt to fetch offsets for partition {} failed due to: {}", topicPartition, error.message());
            future.raise(new StaleMetadataException());
        }
    }
    if (!future.isDone())
        future.complete(timestampOffsetMap);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetResponse(org.apache.kafka.common.requests.ListOffsetResponse) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Example 2 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class Fetcher method parseCompletedFetch.

/**
     * The callback for fetch completion
     */
private PartitionRecords<K, V> parseCompletedFetch(CompletedFetch completedFetch) {
    TopicPartition tp = completedFetch.partition;
    FetchResponse.PartitionData partition = completedFetch.partitionData;
    long fetchOffset = completedFetch.fetchedOffset;
    int bytes = 0;
    int recordsCount = 0;
    PartitionRecords<K, V> parsedRecords = null;
    Errors error = partition.error;
    try {
        if (!subscriptions.isFetchable(tp)) {
            // this can happen when a rebalance happened or a partition consumption paused
            // while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it is no longer fetchable", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            Long position = subscriptions.position(tp);
            if (position == null || position != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            List<ConsumerRecord<K, V>> parsed = new ArrayList<>();
            boolean skippedRecords = false;
            for (LogEntry logEntry : partition.records.deepEntries()) {
                // Skip the messages earlier than current position.
                if (logEntry.offset() >= position) {
                    parsed.add(parseRecord(tp, logEntry));
                    bytes += logEntry.sizeInBytes();
                } else
                    skippedRecords = true;
            }
            recordsCount = parsed.size();
            log.trace("Adding fetched record for partition {} with offset {} to buffered record list", tp, position);
            parsedRecords = new PartitionRecords<>(fetchOffset, tp, parsed);
            if (parsed.isEmpty() && !skippedRecords && (partition.records.sizeInBytes() > 0)) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark >= 0) {
                log.trace("Received {} records in fetch response for partition {} with offset {}", parsed.size(), tp, position);
                subscriptions.updateHighWatermark(tp, partition.highWatermark);
            }
        } else if (error == Errors.NOT_LEADER_FOR_PARTITION) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}. The topic/partition " + "may not exist or the user may not have Describe access to it", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            if (fetchOffset != subscriptions.position(tp)) {
                log.debug("Discarding stale fetch response for partition {} since the fetched offset {}" + "does not match the current offset {}", tp, fetchOffset, subscriptions.position(tp));
            } else if (subscriptions.hasDefaultOffsetResetPolicy()) {
                log.info("Fetch offset {} is out of range for partition {}, resetting offset", fetchOffset, tp);
                subscriptions.needOffsetReset(tp);
            } else {
                throw new OffsetOutOfRangeException(Collections.singletonMap(tp, fetchOffset));
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            log.warn("Not authorized to read from topic {}.", tp.topic());
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN) {
            log.warn("Unknown error fetching data for topic-partition {}", tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching data");
        }
    } finally {
        completedFetch.metricAggregator.record(tp, bytes, recordsCount);
    }
    // likely that partitions for the same topic can remain together (allowing for more efficient serialization).
    if (bytes > 0 || error != Errors.NONE)
        subscriptions.movePartitionToEnd(tp);
    return parsedRecords;
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LogEntry(org.apache.kafka.common.record.LogEntry) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 3 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class RequestResponseTest method createOffsetCommitResponse.

private OffsetCommitResponse createOffsetCommitResponse() {
    Map<TopicPartition, Errors> responseData = new HashMap<>();
    responseData.put(new TopicPartition("test", 0), Errors.NONE);
    return new OffsetCommitResponse(responseData);
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 4 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class DeleteTopicsResponse method toStruct.

@Override
protected Struct toStruct(short version) {
    Struct struct = new Struct(ApiKeys.DELETE_TOPICS.responseSchema(version));
    List<Struct> topicErrorCodeStructs = new ArrayList<>(errors.size());
    for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
        Struct topicErrorCodeStruct = struct.instance(TOPIC_ERROR_CODES_KEY_NAME);
        topicErrorCodeStruct.set(TOPIC_KEY_NAME, topicError.getKey());
        topicErrorCodeStruct.set(ERROR_CODE_KEY_NAME, topicError.getValue().code());
        topicErrorCodeStructs.add(topicErrorCodeStruct);
    }
    struct.set(TOPIC_ERROR_CODES_KEY_NAME, topicErrorCodeStructs.toArray());
    return struct;
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) Struct(org.apache.kafka.common.protocol.types.Struct)

Example 5 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class LeaderAndIsrResponse method toStruct.

@Override
protected Struct toStruct(short version) {
    Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.responseSchema(version));
    List<Struct> responseDatas = new ArrayList<>(responses.size());
    for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) {
        Struct partitionData = struct.instance(PARTITIONS_KEY_NAME);
        TopicPartition partition = response.getKey();
        partitionData.set(PARTITIONS_TOPIC_KEY_NAME, partition.topic());
        partitionData.set(PARTITIONS_PARTITION_KEY_NAME, partition.partition());
        partitionData.set(PARTITIONS_ERROR_CODE_KEY_NAME, response.getValue().code());
        responseDatas.add(partitionData);
    }
    struct.set(PARTITIONS_KEY_NAME, responseDatas.toArray());
    struct.set(ERROR_CODE_KEY_NAME, error.code());
    return struct;
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) Struct(org.apache.kafka.common.protocol.types.Struct)

Aggregations

Errors (org.apache.kafka.common.protocol.Errors)167 HashMap (java.util.HashMap)115 TopicPartition (org.apache.kafka.common.TopicPartition)87 Map (java.util.Map)61 ArrayList (java.util.ArrayList)46 LinkedHashMap (java.util.LinkedHashMap)31 Test (org.junit.jupiter.api.Test)31 List (java.util.List)19 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)19 HashSet (java.util.HashSet)18 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)18 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)17 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)17 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)17 KafkaException (org.apache.kafka.common.KafkaException)16 Node (org.apache.kafka.common.Node)16 Cluster (org.apache.kafka.common.Cluster)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)14 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)14 Collections (java.util.Collections)13