Search in sources :

Example 11 with MutableRecordBatch

use of org.apache.kafka.common.record.MutableRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class ProducerBatch method split.

public Deque<ProducerBatch> split(int splitBatchSize) {
    Deque<ProducerBatch> batches = new ArrayDeque<>();
    MemoryRecords memoryRecords = recordsBuilder.build();
    Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
    if (!recordBatchIter.hasNext())
        throw new IllegalStateException("Cannot split an empty producer batch.");
    RecordBatch recordBatch = recordBatchIter.next();
    if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
        throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
    if (recordBatchIter.hasNext())
        throw new IllegalArgumentException("A producer batch should only have one record batch.");
    Iterator<Thunk> thunkIter = thunks.iterator();
    // We always allocate batch size because we are already splitting a big batch.
    // And we also Retain the create time of the original batch.
    ProducerBatch batch = null;
    for (Record record : recordBatch) {
        assert thunkIter.hasNext();
        Thunk thunk = thunkIter.next();
        if (batch == null)
            batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
        // A newly created batch can always host the first message.
        if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
            batches.add(batch);
            batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
            batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
        }
    }
    // Close the last batch and add it to the batch list after split.
    if (batch != null)
        batches.add(batch);
    produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, new RecordBatchTooLargeException());
    produceFuture.done();
    if (hasSequence()) {
        int sequence = baseSequence();
        ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
        for (ProducerBatch newBatch : batches) {
            newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
            sequence += newBatch.recordCount;
        }
    }
    return batches;
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ArrayDeque(java.util.ArrayDeque) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Record(org.apache.kafka.common.record.Record) RecordBatchTooLargeException(org.apache.kafka.common.errors.RecordBatchTooLargeException) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 12 with MutableRecordBatch

use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.

the class RecordsIterator method nextBatch.

private Optional<Batch<T>> nextBatch() {
    if (!nextBatches.hasNext()) {
        nextBatches = nextBatches();
    }
    if (nextBatches.hasNext()) {
        MutableRecordBatch nextBatch = nextBatches.next();
        // Update the buffer position to reflect the read batch
        allocatedBuffer.ifPresent(buffer -> buffer.position(buffer.position() + nextBatch.sizeInBytes()));
        if (!(nextBatch instanceof DefaultRecordBatch)) {
            throw new IllegalStateException(String.format("DefaultRecordBatch expected by record type was %s", nextBatch.getClass()));
        }
        return Optional.of(readBatch((DefaultRecordBatch) nextBatch));
    }
    return Optional.empty();
}
Also used : DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch)

Example 13 with MutableRecordBatch

use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.

the class RecordAccumulatorTest method testAppendLargeOldMessageFormat.

private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception {
    int batchSize = 512;
    byte[] value = new byte[2 * batchSize];
    ApiVersions apiVersions = new ApiVersions();
    apiVersions.update(node1.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
    RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0);
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
    assertEquals(Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes, "Our partition's leader should be ready");
    Deque<ProducerBatch> batches = accum.batches().get(tp1);
    assertEquals(1, batches.size());
    ProducerBatch producerBatch = batches.peek();
    List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
    assertEquals(1, recordBatches.size());
    MutableRecordBatch recordBatch = recordBatches.get(0);
    assertEquals(0L, recordBatch.baseOffset());
    List<Record> records = TestUtils.toList(recordBatch);
    assertEquals(1, records.size());
    Record record = records.get(0);
    assertEquals(0L, record.offset());
    assertEquals(ByteBuffer.wrap(key), record.key());
    assertEquals(ByteBuffer.wrap(value), record.value());
    assertEquals(0L, record.timestamp());
}
Also used : MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) DefaultRecord(org.apache.kafka.common.record.DefaultRecord) Record(org.apache.kafka.common.record.Record)

Example 14 with MutableRecordBatch

use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.

the class RecordAccumulatorTest method testAppendLarge.

private void testAppendLarge(CompressionType compressionType) throws Exception {
    int batchSize = 512;
    byte[] value = new byte[2 * batchSize];
    RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0);
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
    assertEquals(Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes, "Our partition's leader should be ready");
    Deque<ProducerBatch> batches = accum.batches().get(tp1);
    assertEquals(1, batches.size());
    ProducerBatch producerBatch = batches.peek();
    List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
    assertEquals(1, recordBatches.size());
    MutableRecordBatch recordBatch = recordBatches.get(0);
    assertEquals(0L, recordBatch.baseOffset());
    List<Record> records = TestUtils.toList(recordBatch);
    assertEquals(1, records.size());
    Record record = records.get(0);
    assertEquals(0L, record.offset());
    assertEquals(ByteBuffer.wrap(key), record.key());
    assertEquals(ByteBuffer.wrap(value), record.value());
    assertEquals(0L, record.timestamp());
}
Also used : MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) DefaultRecord(org.apache.kafka.common.record.DefaultRecord) Record(org.apache.kafka.common.record.Record)

Example 15 with MutableRecordBatch

use of org.apache.kafka.common.record.MutableRecordBatch in project kafka by apache.

the class SenderTest method sendIdempotentProducerResponse.

void sendIdempotentProducerResponse(int expectedEpoch, int expectedSequence, TopicPartition tp, Errors responseError, long responseOffset, long logStartOffset) {
    client.respond(body -> {
        ProduceRequest produceRequest = (ProduceRequest) body;
        assertTrue(RequestTestUtils.hasIdempotentRecords(produceRequest));
        MemoryRecords records = partitionRecords(produceRequest).get(tp);
        Iterator<MutableRecordBatch> batchIterator = records.batches().iterator();
        RecordBatch firstBatch = batchIterator.next();
        assertFalse(batchIterator.hasNext());
        if (expectedEpoch > -1)
            assertEquals((short) expectedEpoch, firstBatch.producerEpoch());
        assertEquals(expectedSequence, firstBatch.baseSequence());
        return true;
    }, produceResponse(tp, responseOffset, responseError, 0, logStartOffset, null));
}
Also used : MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) RecordBatch(org.apache.kafka.common.record.RecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Aggregations

MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)15 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)9 Record (org.apache.kafka.common.record.Record)7 RecordBatch (org.apache.kafka.common.record.RecordBatch)5 ProduceRequest (org.apache.kafka.common.requests.ProduceRequest)5 DefaultRecord (org.apache.kafka.common.record.DefaultRecord)4 AbstractRequest (org.apache.kafka.common.requests.AbstractRequest)3 ArrayDeque (java.util.ArrayDeque)2 ApiVersions (org.apache.kafka.clients.ApiVersions)2 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2 RecordBatchTooLargeException (org.apache.kafka.common.errors.RecordBatchTooLargeException)2 Metrics (org.apache.kafka.common.metrics.Metrics)2 Test (org.junit.jupiter.api.Test)2 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Iterator (java.util.Iterator)1 List (java.util.List)1 Collectors (java.util.stream.Collectors)1