Search in sources :

Example 1 with RecordTooLargeException

use of org.apache.kafka.common.errors.RecordTooLargeException in project kafka by apache.

the class Fetcher method parseCompletedFetch.

/**
     * The callback for fetch completion
     */
private PartitionRecords<K, V> parseCompletedFetch(CompletedFetch completedFetch) {
    TopicPartition tp = completedFetch.partition;
    FetchResponse.PartitionData partition = completedFetch.partitionData;
    long fetchOffset = completedFetch.fetchedOffset;
    int bytes = 0;
    int recordsCount = 0;
    PartitionRecords<K, V> parsedRecords = null;
    Errors error = partition.error;
    try {
        if (!subscriptions.isFetchable(tp)) {
            // this can happen when a rebalance happened or a partition consumption paused
            // while fetch is still in-flight
            log.debug("Ignoring fetched records for partition {} since it is no longer fetchable", tp);
        } else if (error == Errors.NONE) {
            // we are interested in this fetch only if the beginning offset matches the
            // current consumed position
            Long position = subscriptions.position(tp);
            if (position == null || position != fetchOffset) {
                log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
                return null;
            }
            List<ConsumerRecord<K, V>> parsed = new ArrayList<>();
            boolean skippedRecords = false;
            for (LogEntry logEntry : partition.records.deepEntries()) {
                // Skip the messages earlier than current position.
                if (logEntry.offset() >= position) {
                    parsed.add(parseRecord(tp, logEntry));
                    bytes += logEntry.sizeInBytes();
                } else
                    skippedRecords = true;
            }
            recordsCount = parsed.size();
            log.trace("Adding fetched record for partition {} with offset {} to buffered record list", tp, position);
            parsedRecords = new PartitionRecords<>(fetchOffset, tp, parsed);
            if (parsed.isEmpty() && !skippedRecords && (partition.records.sizeInBytes() > 0)) {
                if (completedFetch.responseVersion < 3) {
                    // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
                    Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
                    throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
                } else {
                    // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
                    throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
                }
            }
            if (partition.highWatermark >= 0) {
                log.trace("Received {} records in fetch response for partition {} with offset {}", parsed.size(), tp, position);
                subscriptions.updateHighWatermark(tp, partition.highWatermark);
            }
        } else if (error == Errors.NOT_LEADER_FOR_PARTITION) {
            log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
            this.metadata.requestUpdate();
        } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
            log.warn("Received unknown topic or partition error in fetch for partition {}. The topic/partition " + "may not exist or the user may not have Describe access to it", tp);
            this.metadata.requestUpdate();
        } else if (error == Errors.OFFSET_OUT_OF_RANGE) {
            if (fetchOffset != subscriptions.position(tp)) {
                log.debug("Discarding stale fetch response for partition {} since the fetched offset {}" + "does not match the current offset {}", tp, fetchOffset, subscriptions.position(tp));
            } else if (subscriptions.hasDefaultOffsetResetPolicy()) {
                log.info("Fetch offset {} is out of range for partition {}, resetting offset", fetchOffset, tp);
                subscriptions.needOffsetReset(tp);
            } else {
                throw new OffsetOutOfRangeException(Collections.singletonMap(tp, fetchOffset));
            }
        } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
            log.warn("Not authorized to read from topic {}.", tp.topic());
            throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
        } else if (error == Errors.UNKNOWN) {
            log.warn("Unknown error fetching data for topic-partition {}", tp);
        } else {
            throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching data");
        }
    } finally {
        completedFetch.metricAggregator.record(tp, bytes, recordsCount);
    }
    // likely that partitions for the same topic can remain together (allowing for more efficient serialization).
    if (bytes > 0 || error != Errors.NONE)
        subscriptions.movePartitionToEnd(tp);
    return parsedRecords;
}
Also used : FetchResponse(org.apache.kafka.common.requests.FetchResponse) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LogEntry(org.apache.kafka.common.record.LogEntry) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException)

Example 2 with RecordTooLargeException

use of org.apache.kafka.common.errors.RecordTooLargeException in project kafka by apache.

the class ClientCompatibilityTest method testConsume.

public void testConsume(final long prodTimeMs) throws Exception {
    Properties consumerProps = new Properties();
    consumerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
    consumerProps.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 512);
    ClientCompatibilityTestDeserializer deserializer = new ClientCompatibilityTestDeserializer(testConfig.expectClusterId);
    final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
    final List<PartitionInfo> partitionInfos = consumer.partitionsFor(testConfig.topic);
    if (partitionInfos.size() < 1)
        throw new RuntimeException("Expected at least one partition for topic " + testConfig.topic);
    final Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
    final LinkedList<TopicPartition> topicPartitions = new LinkedList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
        timestampsToSearch.put(topicPartition, prodTimeMs);
        topicPartitions.add(topicPartition);
    }
    final OffsetsForTime offsetsForTime = new OffsetsForTime();
    tryFeature("offsetsForTimes", testConfig.offsetsForTimesSupported, new Runnable() {

        @Override
        public void run() {
            offsetsForTime.result = consumer.offsetsForTimes(timestampsToSearch);
        }
    }, new Runnable() {

        @Override
        public void run() {
            log.info("offsetsForTime = {}", offsetsForTime.result);
        }
    });
    // Whether or not offsetsForTimes works, beginningOffsets and endOffsets
    // should work.
    consumer.beginningOffsets(timestampsToSearch.keySet());
    consumer.endOffsets(timestampsToSearch.keySet());
    consumer.assign(topicPartitions);
    consumer.seekToBeginning(topicPartitions);
    final Iterator<byte[]> iter = new Iterator<byte[]>() {

        private static final int TIMEOUT_MS = 10000;

        private Iterator<ConsumerRecord<byte[], byte[]>> recordIter = null;

        private byte[] next = null;

        private byte[] fetchNext() {
            while (true) {
                long curTime = Time.SYSTEM.milliseconds();
                if (curTime - prodTimeMs > TIMEOUT_MS)
                    throw new RuntimeException("Timed out after " + TIMEOUT_MS + " ms.");
                if (recordIter == null) {
                    ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
                    recordIter = records.iterator();
                }
                if (recordIter.hasNext())
                    return recordIter.next().value();
                recordIter = null;
            }
        }

        @Override
        public boolean hasNext() {
            if (next != null)
                return true;
            next = fetchNext();
            return next != null;
        }

        @Override
        public byte[] next() {
            if (!hasNext())
                throw new NoSuchElementException();
            byte[] cur = next;
            next = null;
            return cur;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
    byte[] next = iter.next();
    try {
        compareArrays(message1, next);
        log.debug("Found first message...");
    } catch (RuntimeException e) {
        throw new RuntimeException("The first message in this topic was not ours. Please use a new topic when " + "running this program.");
    }
    try {
        next = iter.next();
        if (testConfig.expectRecordTooLargeException)
            throw new RuntimeException("Expected to get a RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
        try {
            compareArrays(message2, next);
        } catch (RuntimeException e) {
            System.out.println("The second message in this topic was not ours. Please use a new " + "topic when running this program.");
            Exit.exit(1);
        }
    } catch (RecordTooLargeException e) {
        log.debug("Got RecordTooLargeException", e);
        if (!testConfig.expectRecordTooLargeException)
            throw new RuntimeException("Got an unexpected RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
    }
    log.debug("Closing consumer.");
    consumer.close();
    log.info("Closed consumer.");
}
Also used : HashMap(java.util.HashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) LinkedList(java.util.LinkedList) TopicPartition(org.apache.kafka.common.TopicPartition) Iterator(java.util.Iterator) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) NoSuchElementException(java.util.NoSuchElementException)

Aggregations

HashMap (java.util.HashMap)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 RecordTooLargeException (org.apache.kafka.common.errors.RecordTooLargeException)2 ArrayList (java.util.ArrayList)1 Iterator (java.util.Iterator)1 LinkedHashMap (java.util.LinkedHashMap)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 Map (java.util.Map)1 NoSuchElementException (java.util.NoSuchElementException)1 Properties (java.util.Properties)1 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)1 OffsetOutOfRangeException (org.apache.kafka.clients.consumer.OffsetOutOfRangeException)1 KafkaException (org.apache.kafka.common.KafkaException)1 PartitionInfo (org.apache.kafka.common.PartitionInfo)1 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)1 Errors (org.apache.kafka.common.protocol.Errors)1 LogEntry (org.apache.kafka.common.record.LogEntry)1 FetchResponse (org.apache.kafka.common.requests.FetchResponse)1