use of org.apache.kafka.common.record.MutableRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequest method validateRecords.
private void validateRecords(short version, MemoryRecords records) {
if (version >= 3) {
Iterator<MutableRecordBatch> iterator = records.batches().iterator();
if (!iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
MutableRecordBatch entry = iterator.next();
if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
if (iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
idempotent = entry.hasProducerId();
transactional = entry.isTransactional();
}
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
use of org.apache.kafka.common.record.MutableRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null)
batches.add(batch);
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
Aggregations