Search in sources :

Example 1 with InvalidRecordException

use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.

the class DefaultRecord method readPartiallyFrom.

private static PartialDefaultRecord readPartiallyFrom(DataInput input, byte[] skipArray, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException {
    ByteBuffer skipBuffer = ByteBuffer.wrap(skipArray);
    // set its limit to 0 to indicate no bytes readable yet
    skipBuffer.limit(0);
    try {
        // reading the attributes / timestamp / offset and key-size does not require
        // any byte array allocation and therefore we can just read them straight-forwardly
        IntRef bytesRemaining = PrimitiveRef.ofInt(sizeOfBodyInBytes);
        byte attributes = readByte(skipBuffer, input, bytesRemaining);
        long timestampDelta = readVarLong(skipBuffer, input, bytesRemaining);
        long timestamp = baseTimestamp + timestampDelta;
        if (logAppendTime != null)
            timestamp = logAppendTime;
        int offsetDelta = readVarInt(skipBuffer, input, bytesRemaining);
        long offset = baseOffset + offsetDelta;
        int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE;
        // first skip key
        int keySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
        // then skip value
        int valueSize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
        // then skip header
        int numHeaders = readVarInt(skipBuffer, input, bytesRemaining);
        if (numHeaders < 0)
            throw new InvalidRecordException("Found invalid number of record headers " + numHeaders);
        for (int i = 0; i < numHeaders; i++) {
            int headerKeySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
            if (headerKeySize < 0)
                throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
            // headerValueSize
            skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
        }
        if (bytesRemaining.value > 0 || skipBuffer.remaining() > 0)
            throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but there are still bytes remaining");
        return new PartialDefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, keySize, valueSize);
    } catch (BufferUnderflowException | IllegalArgumentException e) {
        throw new InvalidRecordException("Found invalid record structure", e);
    }
}
Also used : IntRef(org.apache.kafka.common.utils.PrimitiveRef.IntRef) ByteBuffer(java.nio.ByteBuffer) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) BufferUnderflowException(java.nio.BufferUnderflowException)

Example 2 with InvalidRecordException

use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.

the class DefaultRecord method readFrom.

private static DefaultRecord readFrom(ByteBuffer buffer, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) {
    try {
        int recordStart = buffer.position();
        byte attributes = buffer.get();
        long timestampDelta = ByteUtils.readVarlong(buffer);
        long timestamp = baseTimestamp + timestampDelta;
        if (logAppendTime != null)
            timestamp = logAppendTime;
        int offsetDelta = ByteUtils.readVarint(buffer);
        long offset = baseOffset + offsetDelta;
        int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE;
        ByteBuffer key = null;
        int keySize = ByteUtils.readVarint(buffer);
        if (keySize >= 0) {
            key = buffer.slice();
            key.limit(keySize);
            buffer.position(buffer.position() + keySize);
        }
        ByteBuffer value = null;
        int valueSize = ByteUtils.readVarint(buffer);
        if (valueSize >= 0) {
            value = buffer.slice();
            value.limit(valueSize);
            buffer.position(buffer.position() + valueSize);
        }
        int numHeaders = ByteUtils.readVarint(buffer);
        if (numHeaders < 0)
            throw new InvalidRecordException("Found invalid number of record headers " + numHeaders);
        final Header[] headers;
        if (numHeaders == 0)
            headers = Record.EMPTY_HEADERS;
        else
            headers = readHeaders(buffer, numHeaders);
        // validate whether we have read all header bytes in the current record
        if (buffer.position() - recordStart != sizeOfBodyInBytes)
            throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but instead read " + (buffer.position() - recordStart));
        return new DefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, key, value, headers);
    } catch (BufferUnderflowException | IllegalArgumentException e) {
        throw new InvalidRecordException("Found invalid record structure", e);
    }
}
Also used : RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ByteBuffer(java.nio.ByteBuffer) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) BufferUnderflowException(java.nio.BufferUnderflowException)

Example 3 with InvalidRecordException

use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.

the class DefaultRecordBatch method uncompressedIterator.

private CloseableIterator<Record> uncompressedIterator() {
    final ByteBuffer buffer = this.buffer.duplicate();
    buffer.position(RECORDS_OFFSET);
    return new RecordIterator() {

        @Override
        protected Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) {
            try {
                return DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, logAppendTime);
            } catch (BufferUnderflowException e) {
                throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached");
            }
        }

        @Override
        protected boolean ensureNoneRemaining() {
            return !buffer.hasRemaining();
        }

        @Override
        public void close() {
        }
    };
}
Also used : OptionalLong(java.util.OptionalLong) ByteBuffer(java.nio.ByteBuffer) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) BufferUnderflowException(java.nio.BufferUnderflowException)

Example 4 with InvalidRecordException

use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.

the class ProduceRequest method validateRecords.

public static void validateRecords(short version, BaseRecords baseRecords) {
    if (version >= 3) {
        if (baseRecords instanceof Records) {
            Records records = (Records) baseRecords;
            Iterator<? extends RecordBatch> iterator = records.batches().iterator();
            if (!iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
            RecordBatch entry = iterator.next();
            if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
            if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
                throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + "use ZStandard compression");
            }
            if (iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
        }
    }
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
Also used : UnsupportedCompressionTypeException(org.apache.kafka.common.errors.UnsupportedCompressionTypeException) RecordBatch(org.apache.kafka.common.record.RecordBatch) Records(org.apache.kafka.common.record.Records) BaseRecords(org.apache.kafka.common.record.BaseRecords) InvalidRecordException(org.apache.kafka.common.InvalidRecordException)

Example 5 with InvalidRecordException

use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.

the class Sender method failBatch.

private void failBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, boolean adjustSequenceNumbers) {
    final RuntimeException topLevelException;
    if (response.error == Errors.TOPIC_AUTHORIZATION_FAILED)
        topLevelException = new TopicAuthorizationException(Collections.singleton(batch.topicPartition.topic()));
    else if (response.error == Errors.CLUSTER_AUTHORIZATION_FAILED)
        topLevelException = new ClusterAuthorizationException("The producer is not authorized to do idempotent sends");
    else
        topLevelException = response.error.exception(response.errorMessage);
    if (response.recordErrors == null || response.recordErrors.isEmpty()) {
        failBatch(batch, topLevelException, adjustSequenceNumbers);
    } else {
        Map<Integer, RuntimeException> recordErrorMap = new HashMap<>(response.recordErrors.size());
        for (ProduceResponse.RecordError recordError : response.recordErrors) {
            // The API leaves us with some awkwardness interpreting the errors in the response.
            // We cannot differentiate between different error cases (such as INVALID_TIMESTAMP)
            // from the single error code at the partition level, so instead we use INVALID_RECORD
            // for all failed records and rely on the message to distinguish the cases.
            final String errorMessage;
            if (recordError.message != null) {
                errorMessage = recordError.message;
            } else if (response.errorMessage != null) {
                errorMessage = response.errorMessage;
            } else {
                errorMessage = response.error.message();
            }
            // use the exception type corresponding to the partition-level error code.
            if (response.recordErrors.size() == 1) {
                recordErrorMap.put(recordError.batchIndex, response.error.exception(errorMessage));
            } else {
                recordErrorMap.put(recordError.batchIndex, new InvalidRecordException(errorMessage));
            }
        }
        Function<Integer, RuntimeException> recordExceptions = batchIndex -> {
            RuntimeException exception = recordErrorMap.get(batchIndex);
            if (exception != null) {
                return exception;
            } else {
                // return a generic exception.
                return new KafkaException("Failed to append record because it was part of a batch " + "which had one more more invalid records");
            }
        };
        failBatch(batch, topLevelException, recordExceptions, adjustSequenceNumbers);
    }
}
Also used : Max(org.apache.kafka.common.metrics.stats.Max) TransactionAbortedException(org.apache.kafka.common.errors.TransactionAbortedException) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Metadata(org.apache.kafka.clients.Metadata) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) Function(java.util.function.Function) ClientRequest(org.apache.kafka.clients.ClientRequest) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) RequestHeader(org.apache.kafka.common.requests.RequestHeader) FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) InvalidMetadataException(org.apache.kafka.common.errors.InvalidMetadataException) KafkaClient(org.apache.kafka.clients.KafkaClient) RecordBatch(org.apache.kafka.common.record.RecordBatch) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) IOException(java.io.IOException) ApiVersions(org.apache.kafka.clients.ApiVersions) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) List(java.util.List) NetworkClientUtils(org.apache.kafka.clients.NetworkClientUtils) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) Avg(org.apache.kafka.common.metrics.stats.Avg) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Meter(org.apache.kafka.common.metrics.stats.Meter) Collections(java.util.Collections) ClientResponse(org.apache.kafka.clients.ClientResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) HashMap(java.util.HashMap) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) KafkaException(org.apache.kafka.common.KafkaException) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException)

Aggregations

InvalidRecordException (org.apache.kafka.common.InvalidRecordException)9 ByteBuffer (java.nio.ByteBuffer)4 BufferUnderflowException (java.nio.BufferUnderflowException)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 KafkaException (org.apache.kafka.common.KafkaException)2 Header (org.apache.kafka.common.header.Header)2 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)2 RecordBatch (org.apache.kafka.common.record.RecordBatch)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 IdentityHashMap (java.util.IdentityHashMap)1 Iterator (java.util.Iterator)1 LinkedHashMap (java.util.LinkedHashMap)1 List (java.util.List)1 Objects (java.util.Objects)1 OptionalLong (java.util.OptionalLong)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 Function (java.util.function.Function)1