Search in sources :

Example 61 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class DescribeProducersHandler method handleResponse.

@Override
public ApiResult<TopicPartition, PartitionProducerState> handleResponse(Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse) {
    DescribeProducersResponse response = (DescribeProducersResponse) abstractResponse;
    Map<TopicPartition, PartitionProducerState> completed = new HashMap<>();
    Map<TopicPartition, Throwable> failed = new HashMap<>();
    List<TopicPartition> unmapped = new ArrayList<>();
    for (DescribeProducersResponseData.TopicResponse topicResponse : response.data().topics()) {
        for (DescribeProducersResponseData.PartitionResponse partitionResponse : topicResponse.partitions()) {
            TopicPartition topicPartition = new TopicPartition(topicResponse.name(), partitionResponse.partitionIndex());
            Errors error = Errors.forCode(partitionResponse.errorCode());
            if (error != Errors.NONE) {
                ApiError apiError = new ApiError(error, partitionResponse.errorMessage());
                handlePartitionError(topicPartition, apiError, failed, unmapped);
                continue;
            }
            List<ProducerState> activeProducers = partitionResponse.activeProducers().stream().map(activeProducer -> {
                OptionalLong currentTransactionFirstOffset = activeProducer.currentTxnStartOffset() < 0 ? OptionalLong.empty() : OptionalLong.of(activeProducer.currentTxnStartOffset());
                OptionalInt coordinatorEpoch = activeProducer.coordinatorEpoch() < 0 ? OptionalInt.empty() : OptionalInt.of(activeProducer.coordinatorEpoch());
                return new ProducerState(activeProducer.producerId(), activeProducer.producerEpoch(), activeProducer.lastSequence(), activeProducer.lastTimestamp(), coordinatorEpoch, currentTransactionFirstOffset);
            }).collect(Collectors.toList());
            completed.put(topicPartition, new PartitionProducerState(activeProducers));
        }
    }
    return new ApiResult<>(completed, failed, unmapped);
}
Also used : DescribeProducersOptions(org.apache.kafka.clients.admin.DescribeProducersOptions) ProducerState(org.apache.kafka.clients.admin.ProducerState) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) HashMap(java.util.HashMap) DescribeProducersRequest(org.apache.kafka.common.requests.DescribeProducersRequest) OptionalInt(java.util.OptionalInt) ApiError(org.apache.kafka.common.requests.ApiError) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) OptionalLong(java.util.OptionalLong) DescribeProducersResponse(org.apache.kafka.common.requests.DescribeProducersResponse) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionProducerState(org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState) Logger(org.slf4j.Logger) DescribeProducersRequestData(org.apache.kafka.common.message.DescribeProducersRequestData) Collection(java.util.Collection) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) Collectors(java.util.stream.Collectors) CollectionUtils(org.apache.kafka.common.utils.CollectionUtils) List(java.util.List) DescribeProducersResponseData(org.apache.kafka.common.message.DescribeProducersResponseData) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) Collections(java.util.Collections) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) PartitionProducerState(org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState) OptionalInt(java.util.OptionalInt) DescribeProducersResponse(org.apache.kafka.common.requests.DescribeProducersResponse) DescribeProducersResponseData(org.apache.kafka.common.message.DescribeProducersResponseData) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerState(org.apache.kafka.clients.admin.ProducerState) PartitionProducerState(org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState) OptionalLong(java.util.OptionalLong) ApiError(org.apache.kafka.common.requests.ApiError)

Example 62 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class ListConsumerGroupOffsetsHandler method handleResponse.

@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
    validateKeys(groupIds);
    final OffsetFetchResponse response = (OffsetFetchResponse) abstractResponse;
    // the groupError will contain the group level error for v0-v8 OffsetFetchResponse
    Errors groupError = response.groupLevelError(groupId.idValue);
    if (groupError != Errors.NONE) {
        final Map<CoordinatorKey, Throwable> failed = new HashMap<>();
        final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
        handleGroupError(groupId, groupError, failed, groupsToUnmap);
        return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap));
    } else {
        final Map<TopicPartition, OffsetAndMetadata> groupOffsetsListing = new HashMap<>();
        response.partitionDataMap(groupId.idValue).forEach((topicPartition, partitionData) -> {
            final Errors error = partitionData.error;
            if (error == Errors.NONE) {
                final long offset = partitionData.offset;
                final String metadata = partitionData.metadata;
                final Optional<Integer> leaderEpoch = partitionData.leaderEpoch;
                // Negative offset indicates that the group has no committed offset for this partition
                if (offset < 0) {
                    groupOffsetsListing.put(topicPartition, null);
                } else {
                    groupOffsetsListing.put(topicPartition, new OffsetAndMetadata(offset, leaderEpoch, metadata));
                }
            } else {
                log.warn("Skipping return offset for {} due to error {}.", topicPartition, error);
            }
        });
        return ApiResult.completed(groupId, groupOffsetsListing);
    }
}
Also used : OffsetFetchResponse(org.apache.kafka.common.requests.OffsetFetchResponse) HashMap(java.util.HashMap) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashSet(java.util.HashSet)

Example 63 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class AlterConsumerGroupOffsetsHandler method handleResponse.

@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
    validateKeys(groupIds);
    final OffsetCommitResponse response = (OffsetCommitResponse) abstractResponse;
    final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
    final Set<CoordinatorKey> groupsToRetry = new HashSet<>();
    final Map<TopicPartition, Errors> partitionResults = new HashMap<>();
    for (OffsetCommitResponseTopic topic : response.data().topics()) {
        for (OffsetCommitResponsePartition partition : topic.partitions()) {
            TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
            Errors error = Errors.forCode(partition.errorCode());
            if (error != Errors.NONE) {
                handleError(groupId, topicPartition, error, partitionResults, groupsToUnmap, groupsToRetry);
            } else {
                partitionResults.put(topicPartition, error);
            }
        }
    }
    if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) {
        return ApiResult.completed(groupId, partitionResults);
    } else {
        return ApiResult.unmapped(new ArrayList<>(groupsToUnmap));
    }
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) OffsetCommitResponse(org.apache.kafka.common.requests.OffsetCommitResponse) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetCommitResponseTopic(org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic) OffsetCommitResponsePartition(org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition) HashSet(java.util.HashSet)

Example 64 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class PartitionLeaderStrategy method handleResponse.

@Override
public LookupResult<TopicPartition> handleResponse(Set<TopicPartition> requestPartitions, AbstractResponse abstractResponse) {
    MetadataResponse response = (MetadataResponse) abstractResponse;
    Map<TopicPartition, Throwable> failed = new HashMap<>();
    Map<TopicPartition, Integer> mapped = new HashMap<>();
    for (MetadataResponseData.MetadataResponseTopic topicMetadata : response.data().topics()) {
        String topic = topicMetadata.name();
        Errors topicError = Errors.forCode(topicMetadata.errorCode());
        if (topicError != Errors.NONE) {
            handleTopicError(topic, topicError, requestPartitions, failed);
            continue;
        }
        for (MetadataResponseData.MetadataResponsePartition partitionMetadata : topicMetadata.partitions()) {
            TopicPartition topicPartition = new TopicPartition(topic, partitionMetadata.partitionIndex());
            Errors partitionError = Errors.forCode(partitionMetadata.errorCode());
            if (!requestPartitions.contains(topicPartition)) {
                // topics, so we have to filter any that we are not interested in.
                continue;
            }
            if (partitionError != Errors.NONE) {
                handlePartitionError(topicPartition, partitionError, failed);
                continue;
            }
            int leaderId = partitionMetadata.leaderId();
            if (leaderId >= 0) {
                mapped.put(topicPartition, leaderId);
            } else {
                log.debug("Metadata request for {} returned no error, but the leader is unknown. Will retry", topicPartition);
            }
        }
    }
    return new LookupResult<>(failed, mapped);
}
Also used : HashMap(java.util.HashMap) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 65 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaConsumerTest method prepareOffsetCommitResponse.

private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final Map<TopicPartition, Long> partitionOffsets) {
    final AtomicBoolean commitReceived = new AtomicBoolean(true);
    Map<TopicPartition, Errors> response = new HashMap<>();
    for (TopicPartition partition : partitionOffsets.keySet()) response.put(partition, Errors.NONE);
    client.prepareResponseFrom(body -> {
        OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
        Map<TopicPartition, Long> commitErrors = commitRequest.offsets();
        for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
            // verify that the expected offset has been committed
            if (!commitErrors.get(partitionOffset.getKey()).equals(partitionOffset.getValue())) {
                commitReceived.set(false);
                return false;
            }
        }
        return true;
    }, offsetCommitResponse(response), coordinator);
    return commitReceived;
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Errors(org.apache.kafka.common.protocol.Errors) OffsetCommitRequest(org.apache.kafka.common.requests.OffsetCommitRequest) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OptionalLong(java.util.OptionalLong) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) AbstractMap(java.util.AbstractMap)

Aggregations

Errors (org.apache.kafka.common.protocol.Errors)167 HashMap (java.util.HashMap)115 TopicPartition (org.apache.kafka.common.TopicPartition)87 Map (java.util.Map)61 ArrayList (java.util.ArrayList)46 LinkedHashMap (java.util.LinkedHashMap)31 Test (org.junit.jupiter.api.Test)31 List (java.util.List)19 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)19 HashSet (java.util.HashSet)18 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)18 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)17 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)17 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)17 KafkaException (org.apache.kafka.common.KafkaException)16 Node (org.apache.kafka.common.Node)16 Cluster (org.apache.kafka.common.Cluster)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)14 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)14 Collections (java.util.Collections)13