use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.
the class DefaultRecord method readPartiallyFrom.
private static PartialDefaultRecord readPartiallyFrom(DataInput input, byte[] skipArray, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException {
ByteBuffer skipBuffer = ByteBuffer.wrap(skipArray);
// set its limit to 0 to indicate no bytes readable yet
skipBuffer.limit(0);
try {
// reading the attributes / timestamp / offset and key-size does not require
// any byte array allocation and therefore we can just read them straight-forwardly
IntRef bytesRemaining = PrimitiveRef.ofInt(sizeOfBodyInBytes);
byte attributes = readByte(skipBuffer, input, bytesRemaining);
long timestampDelta = readVarLong(skipBuffer, input, bytesRemaining);
long timestamp = baseTimestamp + timestampDelta;
if (logAppendTime != null)
timestamp = logAppendTime;
int offsetDelta = readVarInt(skipBuffer, input, bytesRemaining);
long offset = baseOffset + offsetDelta;
int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE;
// first skip key
int keySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
// then skip value
int valueSize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
// then skip header
int numHeaders = readVarInt(skipBuffer, input, bytesRemaining);
if (numHeaders < 0)
throw new InvalidRecordException("Found invalid number of record headers " + numHeaders);
for (int i = 0; i < numHeaders; i++) {
int headerKeySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
if (headerKeySize < 0)
throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
// headerValueSize
skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
}
if (bytesRemaining.value > 0 || skipBuffer.remaining() > 0)
throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but there are still bytes remaining");
return new PartialDefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, keySize, valueSize);
} catch (BufferUnderflowException | IllegalArgumentException e) {
throw new InvalidRecordException("Found invalid record structure", e);
}
}
use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.
the class DefaultRecord method readFrom.
private static DefaultRecord readFrom(ByteBuffer buffer, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) {
try {
int recordStart = buffer.position();
byte attributes = buffer.get();
long timestampDelta = ByteUtils.readVarlong(buffer);
long timestamp = baseTimestamp + timestampDelta;
if (logAppendTime != null)
timestamp = logAppendTime;
int offsetDelta = ByteUtils.readVarint(buffer);
long offset = baseOffset + offsetDelta;
int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE;
ByteBuffer key = null;
int keySize = ByteUtils.readVarint(buffer);
if (keySize >= 0) {
key = buffer.slice();
key.limit(keySize);
buffer.position(buffer.position() + keySize);
}
ByteBuffer value = null;
int valueSize = ByteUtils.readVarint(buffer);
if (valueSize >= 0) {
value = buffer.slice();
value.limit(valueSize);
buffer.position(buffer.position() + valueSize);
}
int numHeaders = ByteUtils.readVarint(buffer);
if (numHeaders < 0)
throw new InvalidRecordException("Found invalid number of record headers " + numHeaders);
final Header[] headers;
if (numHeaders == 0)
headers = Record.EMPTY_HEADERS;
else
headers = readHeaders(buffer, numHeaders);
// validate whether we have read all header bytes in the current record
if (buffer.position() - recordStart != sizeOfBodyInBytes)
throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but instead read " + (buffer.position() - recordStart));
return new DefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, key, value, headers);
} catch (BufferUnderflowException | IllegalArgumentException e) {
throw new InvalidRecordException("Found invalid record structure", e);
}
}
use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.
the class DefaultRecordBatch method uncompressedIterator.
private CloseableIterator<Record> uncompressedIterator() {
final ByteBuffer buffer = this.buffer.duplicate();
buffer.position(RECORDS_OFFSET);
return new RecordIterator() {
@Override
protected Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) {
try {
return DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, logAppendTime);
} catch (BufferUnderflowException e) {
throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached");
}
}
@Override
protected boolean ensureNoneRemaining() {
return !buffer.hasRemaining();
}
@Override
public void close() {
}
};
}
use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.
the class ProduceRequest method validateRecords.
public static void validateRecords(short version, BaseRecords baseRecords) {
if (version >= 3) {
if (baseRecords instanceof Records) {
Records records = (Records) baseRecords;
Iterator<? extends RecordBatch> iterator = records.batches().iterator();
if (!iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
RecordBatch entry = iterator.next();
if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + "use ZStandard compression");
}
if (iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
}
}
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
use of org.apache.kafka.common.InvalidRecordException in project kafka by apache.
the class Sender method failBatch.
private void failBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, boolean adjustSequenceNumbers) {
final RuntimeException topLevelException;
if (response.error == Errors.TOPIC_AUTHORIZATION_FAILED)
topLevelException = new TopicAuthorizationException(Collections.singleton(batch.topicPartition.topic()));
else if (response.error == Errors.CLUSTER_AUTHORIZATION_FAILED)
topLevelException = new ClusterAuthorizationException("The producer is not authorized to do idempotent sends");
else
topLevelException = response.error.exception(response.errorMessage);
if (response.recordErrors == null || response.recordErrors.isEmpty()) {
failBatch(batch, topLevelException, adjustSequenceNumbers);
} else {
Map<Integer, RuntimeException> recordErrorMap = new HashMap<>(response.recordErrors.size());
for (ProduceResponse.RecordError recordError : response.recordErrors) {
// The API leaves us with some awkwardness interpreting the errors in the response.
// We cannot differentiate between different error cases (such as INVALID_TIMESTAMP)
// from the single error code at the partition level, so instead we use INVALID_RECORD
// for all failed records and rely on the message to distinguish the cases.
final String errorMessage;
if (recordError.message != null) {
errorMessage = recordError.message;
} else if (response.errorMessage != null) {
errorMessage = response.errorMessage;
} else {
errorMessage = response.error.message();
}
// use the exception type corresponding to the partition-level error code.
if (response.recordErrors.size() == 1) {
recordErrorMap.put(recordError.batchIndex, response.error.exception(errorMessage));
} else {
recordErrorMap.put(recordError.batchIndex, new InvalidRecordException(errorMessage));
}
}
Function<Integer, RuntimeException> recordExceptions = batchIndex -> {
RuntimeException exception = recordErrorMap.get(batchIndex);
if (exception != null) {
return exception;
} else {
// return a generic exception.
return new KafkaException("Failed to append record because it was part of a batch " + "which had one more more invalid records");
}
};
failBatch(batch, topLevelException, recordExceptions, adjustSequenceNumbers);
}
}
Aggregations