Search in sources :

Example 61 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.

the class DirectBufferOutputStreamTest method testBuildMemoryRecords.

@Test(dataProvider = "initialCapacityAndNumRecords")
public void testBuildMemoryRecords(int initialCapacity, int numRecords) {
    final MemoryRecordsBuilder heapMemoryRecordsBuilder = newMemoryRecordsBuilder(new ByteBufferOutputStream(initialCapacity));
    // We must expose the DirectBufferOutputStream because we need to release the internal ByteBuf later
    final DirectBufferOutputStream directBufferOutputStream = new DirectBufferOutputStream(initialCapacity);
    final MemoryRecordsBuilder directMemoryRecordsBuilder = newMemoryRecordsBuilder(directBufferOutputStream);
    final ByteBuffer valueBuffer = ByteBuffer.allocate(1024);
    for (int i = 0; i < numRecords; i++) {
        heapMemoryRecordsBuilder.appendWithOffset(i, LOG_APPEND_TIME + i, null, valueBuffer.duplicate());
        directMemoryRecordsBuilder.appendWithOffset(i, LOG_APPEND_TIME + i, null, valueBuffer.duplicate());
    }
    final ByteBuffer heapBuffer = heapMemoryRecordsBuilder.build().buffer();
    final ByteBuffer directBuffer = directMemoryRecordsBuilder.build().buffer();
    System.out.println("heapBuffer size: " + heapBuffer.limit() + ", directBuffer size: " + directBuffer.limit());
    Assert.assertEquals(heapBuffer, directBuffer);
    Assert.assertEquals(directBufferOutputStream.getByteBuf().refCnt(), 1);
    directBufferOutputStream.getByteBuf().release();
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.testng.annotations.Test)

Example 62 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.

the class EncodePerformanceTest method prepareFixedRecords.

private static MemoryRecords prepareFixedRecords() {
    final MemoryRecordsBuilder builder = newMemoryRecordsBuilder();
    for (int i = 0; i < NUM_MESSAGES; i++) {
        final byte[] value = new byte[MESSAGE_SIZE];
        Arrays.fill(value, (byte) 'a');
        builder.append(new SimpleRecord(System.currentTimeMillis(), "key".getBytes(), value));
    }
    return builder.build();
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder)

Example 63 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.

the class KafkaMixedEntryFormatterTest method newMemoryRecordsBuilder.

private static MemoryRecords newMemoryRecordsBuilder(final CompressionType type, byte magic) {
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024 * 1024 * 5), magic, type, TimestampType.CREATE_TIME, 0L);
    for (int i = 0; i < 10; i++) {
        final byte[] value = new byte[10];
        Arrays.fill(value, (byte) 'a');
        builder.append(new SimpleRecord(System.currentTimeMillis(), "key".getBytes(), value));
    }
    return builder.build();
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder)

Example 64 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.

the class ByteBufUtils method decodePulsarEntryToKafkaRecords.

public static DecodeResult decodePulsarEntryToKafkaRecords(final MessageMetadata metadata, final ByteBuf payload, final long baseOffset, final byte magic) throws IOException {
    if (metadata.hasMarkerType()) {
        ControlRecordType controlRecordType;
        switch(metadata.getMarkerType()) {
            case MarkerType.TXN_COMMIT_VALUE:
                controlRecordType = ControlRecordType.COMMIT;
                break;
            case MarkerType.TXN_ABORT_VALUE:
                controlRecordType = ControlRecordType.ABORT;
                break;
            default:
                controlRecordType = ControlRecordType.UNKNOWN;
                break;
        }
        return DecodeResult.get(MemoryRecords.withEndTransactionMarker(baseOffset, metadata.getPublishTime(), 0, metadata.getTxnidMostBits(), (short) metadata.getTxnidLeastBits(), new EndTransactionMarker(controlRecordType, 0)));
    }
    long startConversionNanos = MathUtils.nowInNano();
    final int uncompressedSize = metadata.getUncompressedSize();
    final CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(metadata.getCompression());
    final ByteBuf uncompressedPayload = codec.decode(payload, uncompressedSize);
    final DirectBufferOutputStream directBufferOutputStream = new DirectBufferOutputStream(DEFAULT_BUFFER_SIZE);
    final MemoryRecordsBuilder builder = new MemoryRecordsBuilder(directBufferOutputStream, magic, CompressionType.NONE, TimestampType.CREATE_TIME, baseOffset, metadata.getPublishTime(), RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, metadata.hasTxnidMostBits() && metadata.hasTxnidLeastBits(), false, RecordBatch.NO_PARTITION_LEADER_EPOCH, MAX_RECORDS_BUFFER_SIZE);
    if (metadata.hasTxnidMostBits()) {
        builder.setProducerState(metadata.getTxnidMostBits(), (short) metadata.getTxnidLeastBits(), 0, true);
    }
    int conversionCount = 0;
    if (metadata.hasNumMessagesInBatch()) {
        final int numMessages = metadata.getNumMessagesInBatch();
        conversionCount += numMessages;
        for (int i = 0; i < numMessages; i++) {
            final SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata();
            final ByteBuf singleMessagePayload = Commands.deSerializeSingleMessageInBatch(uncompressedPayload, singleMessageMetadata, i, numMessages);
            final long timestamp = (metadata.getEventTime() > 0) ? metadata.getEventTime() : metadata.getPublishTime();
            final ByteBuffer value = singleMessageMetadata.isNullValue() ? null : getNioBuffer(singleMessagePayload);
            if (magic >= RecordBatch.MAGIC_VALUE_V2) {
                final Header[] headers = getHeadersFromMetadata(singleMessageMetadata.getPropertiesList());
                builder.appendWithOffset(baseOffset + i, timestamp, getKeyByteBuffer(singleMessageMetadata), value, headers);
            } else {
                // record less than magic=2, no header attribute
                builder.appendWithOffset(baseOffset + i, timestamp, getKeyByteBuffer(singleMessageMetadata), value);
            }
            singleMessagePayload.release();
        }
    } else {
        conversionCount += 1;
        final long timestamp = (metadata.getEventTime() > 0) ? metadata.getEventTime() : metadata.getPublishTime();
        if (magic >= RecordBatch.MAGIC_VALUE_V2) {
            final Header[] headers = getHeadersFromMetadata(metadata.getPropertiesList());
            builder.appendWithOffset(baseOffset, timestamp, getKeyByteBuffer(metadata), getNioBuffer(uncompressedPayload), headers);
        } else {
            builder.appendWithOffset(baseOffset, timestamp, getKeyByteBuffer(metadata), getNioBuffer(uncompressedPayload));
        }
    }
    final MemoryRecords records = builder.build();
    uncompressedPayload.release();
    return DecodeResult.get(records, directBufferOutputStream.getByteBuf(), conversionCount, MathUtils.elapsedNanos(startConversionNanos));
}
Also used : ByteBuf(io.netty.buffer.ByteBuf) ByteBuffer(java.nio.ByteBuffer) DirectBufferOutputStream(io.streamnative.pulsar.handlers.kop.format.DirectBufferOutputStream) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) SingleMessageMetadata(org.apache.pulsar.common.api.proto.SingleMessageMetadata) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) CompressionCodec(org.apache.pulsar.common.compression.CompressionCodec) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 65 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.

the class KopLogValidator method buildRecordsAndAssignOffsets.

private static ValidationAndOffsetAssignResult buildRecordsAndAssignOffsets(byte magic, LongRef offsetCounter, TimestampType timestampType, CompressionType compressionType, long logAppendTime, ArrayList<Record> validatedRecords, MutableRecordBatch first) {
    long startConversionNanos = MathUtils.nowInNano();
    long producerId = first.producerId();
    short producerEpoch = first.producerEpoch();
    int baseSequence = first.baseSequence();
    boolean isTransactional = first.isTransactional();
    int estimatedSize = AbstractRecords.estimateSizeInBytes(magic, offsetCounter.value(), compressionType, validatedRecords);
    ByteBuffer buffer = ByteBuffer.allocate(estimatedSize);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compressionType, timestampType, offsetCounter.value(), logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, RecordBatch.NO_PARTITION_LEADER_EPOCH);
    validatedRecords.forEach(record -> {
        builder.appendWithOffset(offsetCounter.getAndIncrement(), record);
    });
    MemoryRecords memoryRecords = builder.build();
    int conversionCount = builder.numRecords();
    return ValidationAndOffsetAssignResult.get(memoryRecords, conversionCount, MathUtils.elapsedNanos(startConversionNanos));
}
Also used : MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Aggregations

MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)97 ByteBuffer (java.nio.ByteBuffer)50 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)36 TopicPartition (org.apache.kafka.common.TopicPartition)30 HashMap (java.util.HashMap)26 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)25 Test (org.junit.jupiter.api.Test)25 Test (org.junit.Test)20 ArrayList (java.util.ArrayList)17 List (java.util.List)17 Record (org.apache.kafka.common.record.Record)17 LinkedHashMap (java.util.LinkedHashMap)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)14 MetricName (org.apache.kafka.common.MetricName)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)14 Arrays.asList (java.util.Arrays.asList)10 CompressionType (org.apache.kafka.common.record.CompressionType)10 RecordBatch (org.apache.kafka.common.record.RecordBatch)10 Header (org.apache.kafka.common.header.Header)9 EndTransactionMarker (org.apache.kafka.common.record.EndTransactionMarker)9