use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch.closeForRecordAppends();
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null) {
batches.add(batch);
batch.closeForRecordAppends();
}
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class SenderTest method testSequenceNumberIncrement.
@Test
public void testSequenceNumberIncrement() throws InterruptedException {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
int maxRetries = 10;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
client.prepareResponse(body -> {
if (body instanceof ProduceRequest) {
ProduceRequest request = (ProduceRequest) body;
MemoryRecords records = partitionRecords(request).get(tp0);
Iterator<MutableRecordBatch> batchIterator = records.batches().iterator();
assertTrue(batchIterator.hasNext());
RecordBatch batch = batchIterator.next();
assertFalse(batchIterator.hasNext());
assertEquals(0, batch.baseSequence());
assertEquals(producerId, batch.producerId());
assertEquals(0, batch.producerEpoch());
return true;
}
return false;
}, produceResponse(tp0, 0, Errors.NONE, 0));
// connect.
sender.runOnce();
// send.
sender.runOnce();
// receive response
sender.runOnce();
assertTrue(responseFuture.isDone());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(1L, (long) transactionManager.sequenceNumber(tp0));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class SenderTest method testMessageFormatDownConversion.
@Test
public void testMessageFormatDownConversion() throws Exception {
// this test case verifies the behavior when the version of the produce request supported by the
// broker changes after the record set is created
long offset = 0;
// start off support produce request v3
apiVersions.update("0", NodeApiVersions.create());
Future<RecordMetadata> future = appendToAccumulator(tp0, 0L, "key", "value");
// now the partition leader supports only v2
apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
client.prepareResponse(body -> {
ProduceRequest request = (ProduceRequest) body;
if (request.version() != 2)
return false;
MemoryRecords records = partitionRecords(request).get(tp0);
return records != null && records.sizeInBytes() > 0 && records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1);
}, produceResponse(tp0, offset, Errors.NONE, 0));
// connect
sender.runOnce();
// send produce request
sender.runOnce();
assertTrue(future.isDone(), "Request should be completed");
assertEquals(offset, future.get().offset());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class ProduceRequestTest method testMixedTransactionalData.
@Test
public void testMixedTransactionalData() {
final long producerId = 15L;
final short producerEpoch = 5;
final int sequence = 10;
final MemoryRecords nonTxnRecords = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()));
final MemoryRecords txnRecords = MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence, new SimpleRecord("bar".getBytes()));
ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList(new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(5000));
final ProduceRequest request = builder.build();
assertTrue(RequestUtils.hasTransactionalRecords(request));
assertTrue(RequestTestUtils.hasIdempotentRecords(request));
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class ProduceRequestTest method shouldBeFlaggedAsTransactionalWhenTransactionalRecords.
@Test
public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() {
final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord);
final ProduceRequest request = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("topic").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(memoryRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(10)).build();
assertTrue(RequestUtils.hasTransactionalRecords(request));
}
Aggregations