use of org.apache.kafka.common.utils.PrimitiveRef.IntRef in project kafka by apache.
the class DefaultRecord method readPartiallyFrom.
private static PartialDefaultRecord readPartiallyFrom(DataInput input, byte[] skipArray, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException {
ByteBuffer skipBuffer = ByteBuffer.wrap(skipArray);
// set its limit to 0 to indicate no bytes readable yet
skipBuffer.limit(0);
try {
// reading the attributes / timestamp / offset and key-size does not require
// any byte array allocation and therefore we can just read them straight-forwardly
IntRef bytesRemaining = PrimitiveRef.ofInt(sizeOfBodyInBytes);
byte attributes = readByte(skipBuffer, input, bytesRemaining);
long timestampDelta = readVarLong(skipBuffer, input, bytesRemaining);
long timestamp = baseTimestamp + timestampDelta;
if (logAppendTime != null)
timestamp = logAppendTime;
int offsetDelta = readVarInt(skipBuffer, input, bytesRemaining);
long offset = baseOffset + offsetDelta;
int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE;
// first skip key
int keySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
// then skip value
int valueSize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
// then skip header
int numHeaders = readVarInt(skipBuffer, input, bytesRemaining);
if (numHeaders < 0)
throw new InvalidRecordException("Found invalid number of record headers " + numHeaders);
for (int i = 0; i < numHeaders; i++) {
int headerKeySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
if (headerKeySize < 0)
throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
// headerValueSize
skipLengthDelimitedField(skipBuffer, input, bytesRemaining);
}
if (bytesRemaining.value > 0 || skipBuffer.remaining() > 0)
throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but there are still bytes remaining");
return new PartialDefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, keySize, valueSize);
} catch (BufferUnderflowException | IllegalArgumentException e) {
throw new InvalidRecordException("Found invalid record structure", e);
}
}
Aggregations