use of org.apache.kafka.common.record.MutableRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null)
batches.add(batch);
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.
the class RecordsIterator method nextBatch.
private Optional<Batch<T>> nextBatch() {
if (!nextBatches.hasNext()) {
nextBatches = nextBatches();
}
if (nextBatches.hasNext()) {
MutableRecordBatch nextBatch = nextBatches.next();
// Update the buffer position to reflect the read batch
allocatedBuffer.ifPresent(buffer -> buffer.position(buffer.position() + nextBatch.sizeInBytes()));
if (!(nextBatch instanceof DefaultRecordBatch)) {
throw new IllegalStateException(String.format("DefaultRecordBatch expected by record type was %s", nextBatch.getClass()));
}
return Optional.of(readBatch((DefaultRecordBatch) nextBatch));
}
return Optional.empty();
}
use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.
the class RecordAccumulatorTest method testAppendLargeOldMessageFormat.
private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception {
int batchSize = 512;
byte[] value = new byte[2 * batchSize];
ApiVersions apiVersions = new ApiVersions();
apiVersions.update(node1.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0);
accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
assertEquals(Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes, "Our partition's leader should be ready");
Deque<ProducerBatch> batches = accum.batches().get(tp1);
assertEquals(1, batches.size());
ProducerBatch producerBatch = batches.peek();
List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
assertEquals(1, recordBatches.size());
MutableRecordBatch recordBatch = recordBatches.get(0);
assertEquals(0L, recordBatch.baseOffset());
List<Record> records = TestUtils.toList(recordBatch);
assertEquals(1, records.size());
Record record = records.get(0);
assertEquals(0L, record.offset());
assertEquals(ByteBuffer.wrap(key), record.key());
assertEquals(ByteBuffer.wrap(value), record.value());
assertEquals(0L, record.timestamp());
}
use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.
the class RecordAccumulatorTest method testAppendLarge.
private void testAppendLarge(CompressionType compressionType) throws Exception {
int batchSize = 512;
byte[] value = new byte[2 * batchSize];
RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0);
accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
assertEquals(Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes, "Our partition's leader should be ready");
Deque<ProducerBatch> batches = accum.batches().get(tp1);
assertEquals(1, batches.size());
ProducerBatch producerBatch = batches.peek();
List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
assertEquals(1, recordBatches.size());
MutableRecordBatch recordBatch = recordBatches.get(0);
assertEquals(0L, recordBatch.baseOffset());
List<Record> records = TestUtils.toList(recordBatch);
assertEquals(1, records.size());
Record record = records.get(0);
assertEquals(0L, record.offset());
assertEquals(ByteBuffer.wrap(key), record.key());
assertEquals(ByteBuffer.wrap(value), record.value());
assertEquals(0L, record.timestamp());
}
use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.
the class SenderTest method sendIdempotentProducerResponse.
void sendIdempotentProducerResponse(int expectedEpoch, int expectedSequence, TopicPartition tp, Errors responseError, long responseOffset, long logStartOffset) {
client.respond(body -> {
ProduceRequest produceRequest = (ProduceRequest) body;
assertTrue(RequestTestUtils.hasIdempotentRecords(produceRequest));
MemoryRecords records = partitionRecords(produceRequest).get(tp);
Iterator<MutableRecordBatch> batchIterator = records.batches().iterator();
RecordBatch firstBatch = batchIterator.next();
assertFalse(batchIterator.hasNext());
if (expectedEpoch > -1)
assertEquals((short) expectedEpoch, firstBatch.producerEpoch());
assertEquals(expectedSequence, firstBatch.baseSequence());
return true;
}, produceResponse(tp, responseOffset, responseError, 0, logStartOffset, null));
}
Aggregations