Search in sources :

Example 46 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaAdminClient method handleDescribeTopicsByIds.

private Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsByIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
    final Map<Uuid, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicIds.size());
    final List<Uuid> topicIdsList = new ArrayList<>();
    for (Uuid topicId : topicIds) {
        if (topicIdIsUnrepresentable(topicId)) {
            KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
            future.completeExceptionally(new InvalidTopicException("The given topic id '" + topicId + "' cannot be represented in a request."));
            topicFutures.put(topicId, future);
        } else if (!topicFutures.containsKey(topicId)) {
            topicFutures.put(topicId, new KafkaFutureImpl<>());
            topicIdsList.add(topicId);
        }
    }
    final long now = time.milliseconds();
    Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        @Override
        MetadataRequest.Builder createRequest(int timeoutMs) {
            return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            // Handle server responses for particular topics.
            Cluster cluster = response.buildCluster();
            Map<Uuid, Errors> errors = response.errorsByTopicId();
            for (Map.Entry<Uuid, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
                Uuid topicId = entry.getKey();
                KafkaFutureImpl<TopicDescription> future = entry.getValue();
                String topicName = cluster.topicName(topicId);
                if (topicName == null) {
                    future.completeExceptionally(new InvalidTopicException("TopicId " + topicId + " not found."));
                    continue;
                }
                Errors topicError = errors.get(topicId);
                if (topicError != null) {
                    future.completeExceptionally(topicError.exception());
                    continue;
                }
                Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
                TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
                future.complete(topicDescription);
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(topicFutures.values(), throwable);
        }
    };
    if (!topicIdsList.isEmpty()) {
        runnable.call(call, now);
    }
    return new HashMap<>(topicFutures);
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequestData(org.apache.kafka.common.message.MetadataRequestData) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) Uuid(org.apache.kafka.common.Uuid) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 47 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaAdminClient method handleDescribeTopicsByNames.

private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNames(final Collection<String> topicNames, DescribeTopicsOptions options) {
    final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
    final ArrayList<String> topicNamesList = new ArrayList<>();
    for (String topicName : topicNames) {
        if (topicNameIsUnrepresentable(topicName)) {
            KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
            future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
            topicFutures.put(topicName, future);
        } else if (!topicFutures.containsKey(topicName)) {
            topicFutures.put(topicName, new KafkaFutureImpl<>());
            topicNamesList.add(topicName);
        }
    }
    final long now = time.milliseconds();
    Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        private boolean supportsDisablingTopicCreation = true;

        @Override
        MetadataRequest.Builder createRequest(int timeoutMs) {
            if (supportsDisablingTopicCreation)
                return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertToMetadataRequestTopic(topicNamesList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
            else
                return MetadataRequest.Builder.allTopics();
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            // Handle server responses for particular topics.
            Cluster cluster = response.buildCluster();
            Map<String, Errors> errors = response.errors();
            for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
                String topicName = entry.getKey();
                KafkaFutureImpl<TopicDescription> future = entry.getValue();
                Errors topicError = errors.get(topicName);
                if (topicError != null) {
                    future.completeExceptionally(topicError.exception());
                    continue;
                }
                if (!cluster.topics().contains(topicName)) {
                    future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found."));
                    continue;
                }
                Uuid topicId = cluster.topicId(topicName);
                Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
                TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
                future.complete(topicDescription);
            }
        }

        @Override
        boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
            if (supportsDisablingTopicCreation) {
                supportsDisablingTopicCreation = false;
                return true;
            }
            return false;
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(topicFutures.values(), throwable);
        }
    };
    if (!topicNamesList.isEmpty()) {
        runnable.call(call, now);
    }
    return new HashMap<>(topicFutures);
}
Also used : HashMap(java.util.HashMap) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequestData(org.apache.kafka.common.message.MetadataRequestData) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Uuid(org.apache.kafka.common.Uuid) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 48 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaAdminClient method describeLogDirs.

@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
    final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
    final long now = time.milliseconds();
    for (final Integer brokerId : brokers) {
        KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
        futures.put(brokerId, future);
        runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
                if (descriptions.size() > 0) {
                    future.complete(descriptions);
                } else {
                    // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
                    Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode());
                    future.completeExceptionally(error.exception());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                future.completeExceptionally(throwable);
            }
        }, now);
    }
    return new DescribeLogDirsResult(new HashMap<>(futures));
}
Also used : HashMap(java.util.HashMap) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeLogDirsRequestData(org.apache.kafka.common.message.DescribeLogDirsRequestData) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 49 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaAdminClient method createAcls.

@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
    final long now = time.milliseconds();
    final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
    final List<AclCreation> aclCreations = new ArrayList<>();
    final List<AclBinding> aclBindingsSent = new ArrayList<>();
    for (AclBinding acl : acls) {
        if (futures.get(acl) == null) {
            KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
            futures.put(acl, future);
            String indefinite = acl.toFilter().findIndefiniteField();
            if (indefinite == null) {
                aclCreations.add(CreateAclsRequest.aclCreation(acl));
                aclBindingsSent.add(acl);
            } else {
                future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
            }
        }
    }
    final CreateAclsRequestData data = new CreateAclsRequestData().setCreations(aclCreations);
    runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        @Override
        CreateAclsRequest.Builder createRequest(int timeoutMs) {
            return new CreateAclsRequest.Builder(data);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
            List<AclCreationResult> responses = response.results();
            Iterator<AclCreationResult> iter = responses.iterator();
            for (AclBinding aclBinding : aclBindingsSent) {
                KafkaFutureImpl<Void> future = futures.get(aclBinding);
                if (!iter.hasNext()) {
                    future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL: " + aclBinding));
                } else {
                    AclCreationResult creation = iter.next();
                    Errors error = Errors.forCode(creation.errorCode());
                    ApiError apiError = new ApiError(error, creation.errorMessage());
                    if (apiError.isFailure())
                        future.completeExceptionally(apiError.exception());
                    else
                        future.complete(null);
                }
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, now);
    return new CreateAclsResult(new HashMap<>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) CreateAclsResponse(org.apache.kafka.common.requests.CreateAclsResponse) CreateAclsRequest(org.apache.kafka.common.requests.CreateAclsRequest) Iterator(java.util.Iterator) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) AclBinding(org.apache.kafka.common.acl.AclBinding) AclCreation(org.apache.kafka.common.message.CreateAclsRequestData.AclCreation) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) CreateAclsRequestData(org.apache.kafka.common.message.CreateAclsRequestData) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) Errors(org.apache.kafka.common.protocol.Errors) ApiError(org.apache.kafka.common.requests.ApiError) AclCreationResult(org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult)

Example 50 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class Fetcher method initializeCompletedFetch.

/**
 * Initialize a CompletedFetch object.
 */
private CompletedFetch initializeCompletedFetch(CompletedFetch nextCompletedFetch) {
    TopicPartition tp = nextCompletedFetch.partition;
    FetchResponseData.PartitionData partition = nextCompletedFetch.partitionData;
    long fetchOffset = nextCompletedFetch.nextFetchOffset;
    CompletedFetch completedFetch = null;
    Errors error = Errors.forCode(partition.errorCode());
    try {
        if (!subscriptions.hasValidPosition(tp)) {
            // this can happen when a rebalance happened while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it no longer has valid position", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            FetchPosition position = subscriptions.position(tp);
            if (position == null || position.offset != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            log.trace("Preparing to read {} bytes of data for partition {} with offset {}", FetchResponse.recordsSize(partition), tp, position);
            Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator();
            completedFetch = nextCompletedFetch;
            if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark() >= 0) {
                log.trace("Updating high watermark for partition {} to {}", tp, partition.highWatermark());
                subscriptions.updateHighWatermark(tp, partition.highWatermark());
            }
            if (partition.logStartOffset() >= 0) {
                log.trace("Updating log start offset for partition {} to {}", tp, partition.logStartOffset());
                subscriptions.updateLogStartOffset(tp, partition.logStartOffset());
            }
            if (partition.lastStableOffset() >= 0) {
                log.trace("Updating last stable offset for partition {} to {}", tp, partition.lastStableOffset());
                subscriptions.updateLastStableOffset(tp, partition.lastStableOffset());
            }
            if (FetchResponse.isPreferredReplica(partition)) {
                subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> {
                    long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs();
                    log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}", tp, partition.preferredReadReplica(), expireTimeMs);
                    return expireTimeMs;
                });
            }
            nextCompletedFetch.initialized = true;
        } else if (error == Errors.NOT_LEADER_OR_FOLLOWER || error == Errors.REPLICA_NOT_AVAILABLE || error == Errors.KAFKA_STORAGE_ERROR || error == Errors.FENCED_LEADER_EPOCH || error == Errors.OFFSET_NOT_AVAILABLE) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_ID) {
            log.warn("Received unknown topic ID error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.INCONSISTENT_TOPIC_ID) {
            log.warn("Received inconsistent topic ID error in fetch for partition {}", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            Optional<Integer> clearedReplicaId = subscriptions.clearPreferredReadReplica(tp);
            if (!clearedReplicaId.isPresent()) {
                // If there's no preferred replica to clear, we're fetching from the leader so handle this error normally
                FetchPosition position = subscriptions.position(tp);
                if (position == null || fetchOffset != position.offset) {
                    log.debug("Discarding stale fetch response for partition {} since the fetched offset {} " + "does not match the current offset {}", tp, fetchOffset, position);
                } else {
                    handleOffsetOutOfRange(position, tp);
                }
            } else {
                log.debug("Unset the preferred read replica {} for partition {} since we got {} when fetching {}", clearedReplicaId.get(), tp, error, fetchOffset);
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            // we log the actual partition and not just the topic to help with ACL propagation issues in large clusters
            log.warn("Not authorized to read from partition {}.", tp);
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN_LEADER_EPOCH) {
            log.debug("Received unknown leader epoch error in fetch for partition {}", tp);
        } else if (error == Errors.UNKNOWN_SERVER_ERROR) {
            log.warn("Unknown server error while fetching offset {} for topic-partition {}", fetchOffset, tp);
        } else if (error == Errors.CORRUPT_MESSAGE) {
            throw new KafkaException("Encountered corrupt message when fetching offset " + fetchOffset + " for topic-partition " + tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching at offset " + fetchOffset + " from topic-partition " + tp);
        }
    } finally {
        if (completedFetch == null)
            nextCompletedFetch.metricAggregator.record(tp, 0, 0);
        if (error != Errors.NONE)
            // we move the partition to the end if there was an error. This way, it's more likely that partitions for
            // the same topic can remain together (allowing for more efficient serialization).
            subscriptions.movePartitionToEnd(tp);
    }
    return completedFetch;
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) FetchPosition(org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) CloseableIterator(org.apache.kafka.common.utils.CloseableIterator) Iterator(java.util.Iterator) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Aggregations

Errors (org.apache.kafka.common.protocol.Errors)167 HashMap (java.util.HashMap)115 TopicPartition (org.apache.kafka.common.TopicPartition)87 Map (java.util.Map)61 ArrayList (java.util.ArrayList)46 LinkedHashMap (java.util.LinkedHashMap)31 Test (org.junit.jupiter.api.Test)31 List (java.util.List)19 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)19 HashSet (java.util.HashSet)18 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)18 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)17 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)17 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)17 KafkaException (org.apache.kafka.common.KafkaException)16 Node (org.apache.kafka.common.Node)16 Cluster (org.apache.kafka.common.Cluster)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)14 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)14 Collections (java.util.Collections)13