Search in sources :

Example 31 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class RecordAccumulator method append.

/**
 * Add a record to the accumulator, return the append result
 * <p>
 * The append result will contain the future metadata, and flag for whether the appended batch is full or a new batch is created
 * <p>
 *
 * @param tp The topic/partition to which this record is being sent
 * @param timestamp The timestamp of the record
 * @param key The key for the record
 * @param value The value for the record
 * @param headers the Headers for the record
 * @param callback The user-supplied callback to execute when the request is complete
 * @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available
 * @param abortOnNewBatch A boolean that indicates returning before a new batch is created and
 *                        running the partitioner's onNewBatch method before trying to append again
 * @param nowMs The current time, in milliseconds
 */
public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock, boolean abortOnNewBatch, long nowMs) throws InterruptedException {
    // We keep track of the number of appending thread to make sure we do not miss batches in
    // abortIncompleteBatches().
    appendsInProgress.incrementAndGet();
    ByteBuffer buffer = null;
    if (headers == null)
        headers = Record.EMPTY_HEADERS;
    try {
        // check if we have an in-progress batch
        Deque<ProducerBatch> dq = getOrCreateDeque(tp);
        synchronized (dq) {
            if (closed)
                throw new KafkaException("Producer closed while send in progress");
            RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq, nowMs);
            if (appendResult != null)
                return appendResult;
        }
        // we don't have an in-progress record batch try to allocate a new batch
        if (abortOnNewBatch) {
            // Return a result that will cause another call to append.
            return new RecordAppendResult(null, false, false, true);
        }
        byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
        int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers));
        log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, tp.topic(), tp.partition(), maxTimeToBlock);
        buffer = free.allocate(size, maxTimeToBlock);
        // Update the current time in case the buffer allocation blocked above.
        nowMs = time.milliseconds();
        synchronized (dq) {
            // Need to check if producer is closed again after grabbing the dequeue lock.
            if (closed)
                throw new KafkaException("Producer closed while send in progress");
            RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq, nowMs);
            if (appendResult != null) {
                // Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
                return appendResult;
            }
            MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic);
            ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, nowMs);
            FutureRecordMetadata future = Objects.requireNonNull(batch.tryAppend(timestamp, key, value, headers, callback, nowMs));
            dq.addLast(batch);
            incomplete.add(batch);
            // Don't deallocate this buffer in the finally block as it's being used in the record batch
            buffer = null;
            return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, false);
        }
    } finally {
        if (buffer != null)
            free.deallocate(buffer);
        appendsInProgress.decrementAndGet();
    }
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer)

Example 32 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class ProducerBatchTest method testAppendedChecksumMagicV0AndV1.

@Test
public void testAppendedChecksumMagicV0AndV1() {
    for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) {
        MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
        ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
        byte[] key = "hi".getBytes();
        byte[] value = "there".getBytes();
        FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now);
        assertNotNull(future);
        byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME);
        long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value);
        assertEquals(expectedChecksum, future.checksumOrNull().longValue());
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Test(org.junit.Test)

Example 33 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class RecordBatchIterationBenchmark method createBatch.

private ByteBuffer createBatch(int batchSize) {
    byte[] value = new byte[messageSize];
    final ByteBuffer buf = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytesUpperBound(messageVersion, compressionType, new byte[0], value, Record.EMPTY_HEADERS) * batchSize);
    final MemoryRecordsBuilder builder = MemoryRecords.builder(buf, messageVersion, compressionType, TimestampType.CREATE_TIME, startingOffset);
    for (int i = 0; i < batchSize; ++i) {
        switch(bytes) {
            case ONES:
                Arrays.fill(value, (byte) 1);
                break;
            case RANDOM:
                random.nextBytes(value);
                break;
        }
        builder.append(0, null, value);
    }
    return builder.build().buffer();
}
Also used : MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer)

Example 34 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method fetchResponse.

private FetchResponse fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
    LinkedHashMap<TopicPartition, PartitionData> tpResponses = new LinkedHashMap<>();
    for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) {
        TopicPartition partition = fetchEntry.getKey();
        long fetchOffset = fetchEntry.getValue().offset;
        int fetchCount = fetchEntry.getValue().count;
        final MemoryRecords records;
        if (fetchCount == 0) {
            records = MemoryRecords.EMPTY;
        } else {
            MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, fetchOffset);
            for (int i = 0; i < fetchCount; i++) builder.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes());
            records = builder.build();
        }
        tpResponses.put(partition, new FetchResponse.PartitionData(Errors.NONE, 0, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    }
    return new FetchResponse(Errors.NONE, tpResponses, 0, INVALID_SESSION_ID);
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) FetchResponse(org.apache.kafka.common.requests.FetchResponse) OffsetFetchResponse(org.apache.kafka.common.requests.OffsetFetchResponse) LinkedHashMap(java.util.LinkedHashMap) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Collections.singletonMap(java.util.Collections.singletonMap) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 35 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class ProduceRequestTest method testV3AndAboveShouldContainOnlyOneRecordBatch.

@Test
public void testV3AndAboveShouldContainOnlyOneRecordBatch() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    builder.close();
    builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(11L, "1".getBytes(), "b".getBytes());
    builder.append(12L, null, "c".getBytes());
    builder.close();
    buffer.flip();
    Map<TopicPartition, MemoryRecords> produceData = new HashMap<>();
    produceData.put(new TopicPartition("test", 0), MemoryRecords.readableRecords(buffer));
    ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forCurrentMagic((short) 1, 5000, produceData);
    assertThrowsInvalidRecordExceptionForAllVersions(requestBuilder);
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Aggregations

MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)97 ByteBuffer (java.nio.ByteBuffer)50 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)36 TopicPartition (org.apache.kafka.common.TopicPartition)30 HashMap (java.util.HashMap)26 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)25 Test (org.junit.jupiter.api.Test)25 Test (org.junit.Test)20 ArrayList (java.util.ArrayList)17 List (java.util.List)17 Record (org.apache.kafka.common.record.Record)17 LinkedHashMap (java.util.LinkedHashMap)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)14 MetricName (org.apache.kafka.common.MetricName)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)14 Arrays.asList (java.util.Arrays.asList)10 CompressionType (org.apache.kafka.common.record.CompressionType)10 RecordBatch (org.apache.kafka.common.record.RecordBatch)10 Header (org.apache.kafka.common.header.Header)9 EndTransactionMarker (org.apache.kafka.common.record.EndTransactionMarker)9