Search in sources :

Example 1 with TopicAuthorizationException

use of org.apache.kafka.common.errors.TopicAuthorizationException in project kafka by apache.

the class KafkaProducer method waitOnMetadata.

/**
     * Wait for cluster metadata including partitions for the given topic to be available.
     * @param topic The topic we want metadata for
     * @param partition A specific partition expected to exist in metadata, or null if there's no preference
     * @param maxWaitMs The maximum time in ms for waiting on the metadata
     * @return The cluster containing topic metadata and the amount of time we waited in ms
     */
private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long maxWaitMs) throws InterruptedException {
    // add topic to metadata topic list if it is not there already and reset expiry
    metadata.add(topic);
    Cluster cluster = metadata.fetch();
    Integer partitionsCount = cluster.partitionCountForTopic(topic);
    // or within the known partition range
    if (partitionsCount != null && (partition == null || partition < partitionsCount))
        return new ClusterAndWaitTime(cluster, 0);
    long begin = time.milliseconds();
    long remainingWaitMs = maxWaitMs;
    long elapsed;
    // is stale and the number of partitions for this topic has increased in the meantime.
    do {
        log.trace("Requesting metadata update for topic {}.", topic);
        int version = metadata.requestUpdate();
        sender.wakeup();
        try {
            metadata.awaitUpdate(version, remainingWaitMs);
        } catch (TimeoutException ex) {
            // Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs
            throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
        }
        cluster = metadata.fetch();
        elapsed = time.milliseconds() - begin;
        if (elapsed >= maxWaitMs)
            throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
        if (cluster.unauthorizedTopics().contains(topic))
            throw new TopicAuthorizationException(topic);
        remainingWaitMs = maxWaitMs - elapsed;
        partitionsCount = cluster.partitionCountForTopic(topic);
    } while (partitionsCount == null);
    if (partition != null && partition >= partitionsCount) {
        throw new KafkaException(String.format("Invalid partition given with record: %d is not in the range [0...%d).", partition, partitionsCount));
    }
    return new ClusterAndWaitTime(cluster, elapsed);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Cluster(org.apache.kafka.common.Cluster) KafkaException(org.apache.kafka.common.KafkaException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 2 with TopicAuthorizationException

use of org.apache.kafka.common.errors.TopicAuthorizationException in project kafka by apache.

the class Sender method completeBatch.

/**
     * Complete or retry the given batch of records.
     * 
     * @param batch The record batch
     * @param response The produce response
     * @param correlationId The correlation id for the request
     * @param now The current POSIX timestamp in milliseconds
     */
private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, long correlationId, long now) {
    Errors error = response.error;
    if (error != Errors.NONE && canRetry(batch, error)) {
        // retry
        log.warn("Got error produce response with correlation id {} on topic-partition {}, retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts() - 1, error);
        this.accumulator.reenqueue(batch, now);
        this.sensors.recordRetries(batch.topicPartition.topic(), batch.recordCount);
    } else {
        RuntimeException exception;
        if (error == Errors.TOPIC_AUTHORIZATION_FAILED)
            exception = new TopicAuthorizationException(batch.topicPartition.topic());
        else
            exception = error.exception();
        // tell the user the result of their request
        batch.done(response.baseOffset, response.logAppendTime, exception);
        this.accumulator.deallocate(batch);
        if (error != Errors.NONE)
            this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount);
    }
    if (error.exception() instanceof InvalidMetadataException) {
        if (error.exception() instanceof UnknownTopicOrPartitionException)
            log.warn("Received unknown topic or partition error in produce request on partition {}. The " + "topic/partition may not exist or the user may not have Describe access to it", batch.topicPartition);
        metadata.requestUpdate();
    }
    // Unmute the completed partition.
    if (guaranteeMessageOrder)
        this.accumulator.unmutePartition(batch.topicPartition);
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) InvalidMetadataException(org.apache.kafka.common.errors.InvalidMetadataException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 3 with TopicAuthorizationException

use of org.apache.kafka.common.errors.TopicAuthorizationException in project kafka by apache.

the class Fetcher method parseCompletedFetch.

/**
     * The callback for fetch completion
     */
private PartitionRecords<K, V> parseCompletedFetch(CompletedFetch completedFetch) {
    TopicPartition tp = completedFetch.partition;
    FetchResponse.PartitionData partition = completedFetch.partitionData;
    long fetchOffset = completedFetch.fetchedOffset;
    int bytes = 0;
    int recordsCount = 0;
    PartitionRecords<K, V> parsedRecords = null;
    Errors error = partition.error;
    try {
        if (!subscriptions.isFetchable(tp)) {
            // this can happen when a rebalance happened or a partition consumption paused
            // while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it is no longer fetchable", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            Long position = subscriptions.position(tp);
            if (position == null || position != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            List<ConsumerRecord<K, V>> parsed = new ArrayList<>();
            boolean skippedRecords = false;
            for (LogEntry logEntry : partition.records.deepEntries()) {
                // Skip the messages earlier than current position.
                if (logEntry.offset() >= position) {
                    parsed.add(parseRecord(tp, logEntry));
                    bytes += logEntry.sizeInBytes();
                } else
                    skippedRecords = true;
            }
            recordsCount = parsed.size();
            log.trace("Adding fetched record for partition {} with offset {} to buffered record list", tp, position);
            parsedRecords = new PartitionRecords<>(fetchOffset, tp, parsed);
            if (parsed.isEmpty() && !skippedRecords && (partition.records.sizeInBytes() > 0)) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark >= 0) {
                log.trace("Received {} records in fetch response for partition {} with offset {}", parsed.size(), tp, position);
                subscriptions.updateHighWatermark(tp, partition.highWatermark);
            }
        } else if (error == Errors.NOT_LEADER_FOR_PARTITION) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}. The topic/partition " + "may not exist or the user may not have Describe access to it", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            if (fetchOffset != subscriptions.position(tp)) {
                log.debug("Discarding stale fetch response for partition {} since the fetched offset {}" + "does not match the current offset {}", tp, fetchOffset, subscriptions.position(tp));
            } else if (subscriptions.hasDefaultOffsetResetPolicy()) {
                log.info("Fetch offset {} is out of range for partition {}, resetting offset", fetchOffset, tp);
                subscriptions.needOffsetReset(tp);
            } else {
                throw new OffsetOutOfRangeException(Collections.singletonMap(tp, fetchOffset));
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            log.warn("Not authorized to read from topic {}.", tp.topic());
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN) {
            log.warn("Unknown error fetching data for topic-partition {}", tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching data");
        }
    } finally {
        completedFetch.metricAggregator.record(tp, bytes, recordsCount);
    }
    // likely that partitions for the same topic can remain together (allowing for more efficient serialization).
    if (bytes > 0 || error != Errors.NONE)
        subscriptions.movePartitionToEnd(tp);
    return parsedRecords;
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LogEntry(org.apache.kafka.common.record.LogEntry) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 4 with TopicAuthorizationException

use of org.apache.kafka.common.errors.TopicAuthorizationException in project kafka by apache.

the class Fetcher method getTopicMetadata.

/**
     * Get metadata for all topics present in Kafka cluster
     *
     * @param request The MetadataRequest to send
     * @param timeout time for which getting topic metadata is attempted
     * @return The map of topics with their partition information
     */
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) {
    // Save the round trip if no topics are requested.
    if (!request.isAllTopics() && request.topics().isEmpty())
        return Collections.emptyMap();
    long start = time.milliseconds();
    long remaining = timeout;
    do {
        RequestFuture<ClientResponse> future = sendMetadataRequest(request);
        client.poll(future, remaining);
        if (future.failed() && !future.isRetriable())
            throw future.exception();
        if (future.succeeded()) {
            MetadataResponse response = (MetadataResponse) future.value().responseBody();
            Cluster cluster = response.cluster();
            Set<String> unauthorizedTopics = cluster.unauthorizedTopics();
            if (!unauthorizedTopics.isEmpty())
                throw new TopicAuthorizationException(unauthorizedTopics);
            boolean shouldRetry = false;
            Map<String, Errors> errors = response.errors();
            if (!errors.isEmpty()) {
                // if there were errors, we need to check whether they were fatal or whether
                // we should just retry
                log.debug("Topic metadata fetch included errors: {}", errors);
                for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) {
                    String topic = errorEntry.getKey();
                    Errors error = errorEntry.getValue();
                    if (error == Errors.INVALID_TOPIC_EXCEPTION)
                        throw new InvalidTopicException("Topic '" + topic + "' is invalid");
                    else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION)
                        // in the returned map
                        continue;
                    else if (error.exception() instanceof RetriableException)
                        shouldRetry = true;
                    else
                        throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception());
                }
            }
            if (!shouldRetry) {
                HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>();
                for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic));
                return topicsPartitionInfos;
            }
        }
        long elapsed = time.milliseconds() - start;
        remaining = timeout - elapsed;
        if (remaining > 0) {
            long backoff = Math.min(remaining, retryBackoffMs);
            time.sleep(backoff);
            remaining -= backoff;
        }
    } while (remaining > 0);
    throw new TimeoutException("Timeout expired while fetching topic metadata");
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Cluster(org.apache.kafka.common.Cluster) Errors(org.apache.kafka.common.protocol.Errors) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) KafkaException(org.apache.kafka.common.KafkaException) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) RetriableException(org.apache.kafka.common.errors.RetriableException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Aggregations

TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)4 KafkaException (org.apache.kafka.common.KafkaException)3 Errors (org.apache.kafka.common.protocol.Errors)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 List (java.util.List)2 Map (java.util.Map)2 Cluster (org.apache.kafka.common.Cluster)2 TimeoutException (org.apache.kafka.common.errors.TimeoutException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 ClientResponse (org.apache.kafka.clients.ClientResponse)1 OffsetOutOfRangeException (org.apache.kafka.clients.consumer.OffsetOutOfRangeException)1 TopicPartition (org.apache.kafka.common.TopicPartition)1 InvalidMetadataException (org.apache.kafka.common.errors.InvalidMetadataException)1 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)1 RecordTooLargeException (org.apache.kafka.common.errors.RecordTooLargeException)1 RetriableException (org.apache.kafka.common.errors.RetriableException)1 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)1 LogEntry (org.apache.kafka.common.record.LogEntry)1