use of org.apache.kafka.common.errors.RecordBatchTooLargeException in project kafka by apache.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch.closeForRecordAppends();
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null) {
batches.add(batch);
batch.closeForRecordAppends();
}
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
use of org.apache.kafka.common.errors.RecordBatchTooLargeException in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null)
batches.add(batch);
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
Aggregations