Search in sources :

Example 1 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class Fetcher method parseCompletedFetch.

/**
     * The callback for fetch completion
     */
private PartitionRecords<K, V> parseCompletedFetch(CompletedFetch completedFetch) {
    TopicPartition tp = completedFetch.partition;
    FetchResponse.PartitionData partition = completedFetch.partitionData;
    long fetchOffset = completedFetch.fetchedOffset;
    int bytes = 0;
    int recordsCount = 0;
    PartitionRecords<K, V> parsedRecords = null;
    Errors error = partition.error;
    try {
        if (!subscriptions.isFetchable(tp)) {
            // this can happen when a rebalance happened or a partition consumption paused
            // while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it is no longer fetchable", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            Long position = subscriptions.position(tp);
            if (position == null || position != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            List<ConsumerRecord<K, V>> parsed = new ArrayList<>();
            boolean skippedRecords = false;
            for (LogEntry logEntry : partition.records.deepEntries()) {
                // Skip the messages earlier than current position.
                if (logEntry.offset() >= position) {
                    parsed.add(parseRecord(tp, logEntry));
                    bytes += logEntry.sizeInBytes();
                } else
                    skippedRecords = true;
            }
            recordsCount = parsed.size();
            log.trace("Adding fetched record for partition {} with offset {} to buffered record list", tp, position);
            parsedRecords = new PartitionRecords<>(fetchOffset, tp, parsed);
            if (parsed.isEmpty() && !skippedRecords && (partition.records.sizeInBytes() > 0)) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark >= 0) {
                log.trace("Received {} records in fetch response for partition {} with offset {}", parsed.size(), tp, position);
                subscriptions.updateHighWatermark(tp, partition.highWatermark);
            }
        } else if (error == Errors.NOT_LEADER_FOR_PARTITION) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}. The topic/partition " + "may not exist or the user may not have Describe access to it", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            if (fetchOffset != subscriptions.position(tp)) {
                log.debug("Discarding stale fetch response for partition {} since the fetched offset {}" + "does not match the current offset {}", tp, fetchOffset, subscriptions.position(tp));
            } else if (subscriptions.hasDefaultOffsetResetPolicy()) {
                log.info("Fetch offset {} is out of range for partition {}, resetting offset", fetchOffset, tp);
                subscriptions.needOffsetReset(tp);
            } else {
                throw new OffsetOutOfRangeException(Collections.singletonMap(tp, fetchOffset));
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            log.warn("Not authorized to read from topic {}.", tp.topic());
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN) {
            log.warn("Unknown error fetching data for topic-partition {}", tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching data");
        }
    } finally {
        completedFetch.metricAggregator.record(tp, bytes, recordsCount);
    }
    // likely that partitions for the same topic can remain together (allowing for more efficient serialization).
    if (bytes > 0 || error != Errors.NONE)
        subscriptions.movePartitionToEnd(tp);
    return parsedRecords;
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LogEntry(org.apache.kafka.common.record.LogEntry) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 2 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class PlaintextChannelBuilder method configure.

public void configure(Map<String, ?> configs) throws KafkaException {
    try {
        this.configs = configs;
        principalBuilder = ChannelBuilders.createPrincipalBuilder(configs);
    } catch (Exception e) {
        throw new KafkaException(e);
    }
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) KafkaException(org.apache.kafka.common.KafkaException)

Example 3 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class AbstractConfig method getConfiguredInstance.

/**
     * Get a configured instance of the give class specified by the given configuration key. If the object implements
     * Configurable configure it using the configuration.
     *
     * @param key The configuration key for the class
     * @param t The interface the class should implement
     * @return A configured instance of the class
     */
public <T> T getConfiguredInstance(String key, Class<T> t) {
    Class<?> c = getClass(key);
    if (c == null)
        return null;
    Object o = Utils.newInstance(c);
    if (!t.isInstance(o))
        throw new KafkaException(c.getName() + " is not an instance of " + t.getName());
    if (o instanceof Configurable)
        ((Configurable) o).configure(originals());
    return t.cast(o);
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) Configurable(org.apache.kafka.common.Configurable)

Example 4 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class AbstractConfig method getConfiguredInstances.

/**
     * Get a list of configured instances of the given class specified by the given configuration key. The configuration
     * may specify either null or an empty string to indicate no configured instances. In both cases, this method
     * returns an empty list to indicate no configured instances.
     * @param key The configuration key for the class
     * @param t The interface the class should implement
     * @param configOverrides Configuration overrides to use.
     * @return The list of configured instances
     */
public <T> List<T> getConfiguredInstances(String key, Class<T> t, Map<String, Object> configOverrides) {
    List<String> klasses = getList(key);
    List<T> objects = new ArrayList<T>();
    if (klasses == null)
        return objects;
    Map<String, Object> configPairs = originals();
    configPairs.putAll(configOverrides);
    for (Object klass : klasses) {
        Object o;
        if (klass instanceof String) {
            try {
                o = Utils.newInstance((String) klass, t);
            } catch (ClassNotFoundException e) {
                throw new KafkaException(klass + " ClassNotFoundException exception occurred", e);
            }
        } else if (klass instanceof Class<?>) {
            o = Utils.newInstance((Class<?>) klass);
        } else
            throw new KafkaException("List contains element of type " + klass.getClass().getName() + ", expected String or Class");
        if (!t.isInstance(o))
            throw new KafkaException(klass + " is not an instance of " + t.getName());
        if (o instanceof Configurable)
            ((Configurable) o).configure(configPairs);
        objects.add(t.cast(o));
    }
    return objects;
}
Also used : ArrayList(java.util.ArrayList) KafkaException(org.apache.kafka.common.KafkaException) Configurable(org.apache.kafka.common.Configurable)

Example 5 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class StreamPartitionAssignor method configure.

/**
     * We need to have the PartitionAssignor and its StreamThread to be mutually accessible
     * since the former needs later's cached metadata while sending subscriptions,
     * and the latter needs former's returned assignment when adding tasks.
     * @throws KafkaException if the stream thread is not specified
     */
@Override
public void configure(Map<String, ?> configs) {
    numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG);
    Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE);
    if (o == null) {
        KafkaException ex = new KafkaException("StreamThread is not specified");
        log.error(ex.getMessage(), ex);
        throw ex;
    }
    if (!(o instanceof StreamThread)) {
        KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName()));
        log.error(ex.getMessage(), ex);
        throw ex;
    }
    streamThread = (StreamThread) o;
    streamThread.partitionAssignor(this);
    String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG);
    if (userEndPoint != null && !userEndPoint.isEmpty()) {
        try {
            String host = getHost(userEndPoint);
            Integer port = getPort(userEndPoint);
            if (host == null || port == null)
                throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint));
        } catch (NumberFormatException nfe) {
            throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG));
        }
        this.userEndPoint = userEndPoint;
    }
    internalTopicManager = new InternalTopicManager(new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT);
    this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName());
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) ConfigException(org.apache.kafka.common.config.ConfigException)

Aggregations

KafkaException (org.apache.kafka.common.KafkaException)262 Test (org.junit.Test)69 TopicPartition (org.apache.kafka.common.TopicPartition)56 Test (org.junit.jupiter.api.Test)47 HashMap (java.util.HashMap)40 IOException (java.io.IOException)39 StreamsException (org.apache.kafka.streams.errors.StreamsException)34 Map (java.util.Map)32 TimeoutException (org.apache.kafka.common.errors.TimeoutException)28 ArrayList (java.util.ArrayList)27 List (java.util.List)21 ByteBuffer (java.nio.ByteBuffer)19 ExecutionException (java.util.concurrent.ExecutionException)19 ConfigException (org.apache.kafka.common.config.ConfigException)16 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)14 HashSet (java.util.HashSet)13 Properties (java.util.Properties)13 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)11