Search in sources :

Example 26 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class ProducerBatch method split.

public Deque<ProducerBatch> split(int splitBatchSize) {
    Deque<ProducerBatch> batches = new ArrayDeque<>();
    MemoryRecords memoryRecords = recordsBuilder.build();
    Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
    if (!recordBatchIter.hasNext())
        throw new IllegalStateException("Cannot split an empty producer batch.");
    RecordBatch recordBatch = recordBatchIter.next();
    if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
        throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
    if (recordBatchIter.hasNext())
        throw new IllegalArgumentException("A producer batch should only have one record batch.");
    Iterator<Thunk> thunkIter = thunks.iterator();
    // We always allocate batch size because we are already splitting a big batch.
    // And we also Retain the create time of the original batch.
    ProducerBatch batch = null;
    for (Record record : recordBatch) {
        assert thunkIter.hasNext();
        Thunk thunk = thunkIter.next();
        if (batch == null)
            batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
        // A newly created batch can always host the first message.
        if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
            batches.add(batch);
            batch.closeForRecordAppends();
            batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
            batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
        }
    }
    // Close the last batch and add it to the batch list after split.
    if (batch != null) {
        batches.add(batch);
        batch.closeForRecordAppends();
    }
    produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException());
    produceFuture.done();
    if (hasSequence()) {
        int sequence = baseSequence();
        ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
        for (ProducerBatch newBatch : batches) {
            newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
            sequence += newBatch.recordCount;
        }
    }
    return batches;
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) ArrayDeque(java.util.ArrayDeque) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Record(org.apache.kafka.common.record.Record) RecordBatchTooLargeException(org.apache.kafka.common.errors.RecordBatchTooLargeException) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 27 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class SenderTest method testSequenceNumberIncrement.

@Test
public void testSequenceNumberIncrement() throws InterruptedException {
    final long producerId = 343434L;
    TransactionManager transactionManager = createTransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    int maxRetries = 10;
    Metrics m = new Metrics();
    SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
    Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    client.prepareResponse(body -> {
        if (body instanceof ProduceRequest) {
            ProduceRequest request = (ProduceRequest) body;
            MemoryRecords records = partitionRecords(request).get(tp0);
            Iterator<MutableRecordBatch> batchIterator = records.batches().iterator();
            assertTrue(batchIterator.hasNext());
            RecordBatch batch = batchIterator.next();
            assertFalse(batchIterator.hasNext());
            assertEquals(0, batch.baseSequence());
            assertEquals(producerId, batch.producerId());
            assertEquals(0, batch.producerEpoch());
            return true;
        }
        return false;
    }, produceResponse(tp0, 0, Errors.NONE, 0));
    // connect.
    sender.runOnce();
    // send.
    sender.runOnce();
    // receive response
    sender.runOnce();
    assertTrue(responseFuture.isDone());
    assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
    assertEquals(1L, (long) transactionManager.sequenceNumber(tp0));
}
Also used : ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) RecordBatch(org.apache.kafka.common.record.RecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Metrics(org.apache.kafka.common.metrics.Metrics) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 28 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class SenderTest method testMessageFormatDownConversion.

@Test
public void testMessageFormatDownConversion() throws Exception {
    // this test case verifies the behavior when the version of the produce request supported by the
    // broker changes after the record set is created
    long offset = 0;
    // start off support produce request v3
    apiVersions.update("0", NodeApiVersions.create());
    Future<RecordMetadata> future = appendToAccumulator(tp0, 0L, "key", "value");
    // now the partition leader supports only v2
    apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
    client.prepareResponse(body -> {
        ProduceRequest request = (ProduceRequest) body;
        if (request.version() != 2)
            return false;
        MemoryRecords records = partitionRecords(request).get(tp0);
        return records != null && records.sizeInBytes() > 0 && records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1);
    }, produceResponse(tp0, offset, Errors.NONE, 0));
    // connect
    sender.runOnce();
    // send produce request
    sender.runOnce();
    assertTrue(future.isDone(), "Request should be completed");
    assertEquals(offset, future.get().offset());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 29 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class ProduceRequestTest method testMixedTransactionalData.

@Test
public void testMixedTransactionalData() {
    final long producerId = 15L;
    final short producerEpoch = 5;
    final int sequence = 10;
    final MemoryRecords nonTxnRecords = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("foo".getBytes()));
    final MemoryRecords txnRecords = MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence, new SimpleRecord("bar".getBytes()));
    ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList(new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(5000));
    final ProduceRequest request = builder.build();
    assertTrue(RequestUtils.hasTransactionalRecords(request));
    assertTrue(RequestTestUtils.hasIdempotentRecords(request));
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 30 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class ProduceRequestTest method shouldBeFlaggedAsTransactionalWhenTransactionalRecords.

@Test
public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() {
    final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord);
    final ProduceRequest request = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("topic").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(memoryRecords)))).iterator())).setAcks((short) -1).setTimeoutMs(10)).build();
    assertTrue(RequestUtils.hasTransactionalRecords(request));
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11