use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class DirectBufferOutputStreamTest method testBuildMemoryRecords.
@Test(dataProvider = "initialCapacityAndNumRecords")
public void testBuildMemoryRecords(int initialCapacity, int numRecords) {
final MemoryRecordsBuilder heapMemoryRecordsBuilder = newMemoryRecordsBuilder(new ByteBufferOutputStream(initialCapacity));
// We must expose the DirectBufferOutputStream because we need to release the internal ByteBuf later
final DirectBufferOutputStream directBufferOutputStream = new DirectBufferOutputStream(initialCapacity);
final MemoryRecordsBuilder directMemoryRecordsBuilder = newMemoryRecordsBuilder(directBufferOutputStream);
final ByteBuffer valueBuffer = ByteBuffer.allocate(1024);
for (int i = 0; i < numRecords; i++) {
heapMemoryRecordsBuilder.appendWithOffset(i, LOG_APPEND_TIME + i, null, valueBuffer.duplicate());
directMemoryRecordsBuilder.appendWithOffset(i, LOG_APPEND_TIME + i, null, valueBuffer.duplicate());
}
final ByteBuffer heapBuffer = heapMemoryRecordsBuilder.build().buffer();
final ByteBuffer directBuffer = directMemoryRecordsBuilder.build().buffer();
System.out.println("heapBuffer size: " + heapBuffer.limit() + ", directBuffer size: " + directBuffer.limit());
Assert.assertEquals(heapBuffer, directBuffer);
Assert.assertEquals(directBufferOutputStream.getByteBuf().refCnt(), 1);
directBufferOutputStream.getByteBuf().release();
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class EncodePerformanceTest method prepareFixedRecords.
private static MemoryRecords prepareFixedRecords() {
final MemoryRecordsBuilder builder = newMemoryRecordsBuilder();
for (int i = 0; i < NUM_MESSAGES; i++) {
final byte[] value = new byte[MESSAGE_SIZE];
Arrays.fill(value, (byte) 'a');
builder.append(new SimpleRecord(System.currentTimeMillis(), "key".getBytes(), value));
}
return builder.build();
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class KafkaMixedEntryFormatterTest method newMemoryRecordsBuilder.
private static MemoryRecords newMemoryRecordsBuilder(final CompressionType type, byte magic) {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024 * 1024 * 5), magic, type, TimestampType.CREATE_TIME, 0L);
for (int i = 0; i < 10; i++) {
final byte[] value = new byte[10];
Arrays.fill(value, (byte) 'a');
builder.append(new SimpleRecord(System.currentTimeMillis(), "key".getBytes(), value));
}
return builder.build();
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class ByteBufUtils method decodePulsarEntryToKafkaRecords.
public static DecodeResult decodePulsarEntryToKafkaRecords(final MessageMetadata metadata, final ByteBuf payload, final long baseOffset, final byte magic) throws IOException {
if (metadata.hasMarkerType()) {
ControlRecordType controlRecordType;
switch(metadata.getMarkerType()) {
case MarkerType.TXN_COMMIT_VALUE:
controlRecordType = ControlRecordType.COMMIT;
break;
case MarkerType.TXN_ABORT_VALUE:
controlRecordType = ControlRecordType.ABORT;
break;
default:
controlRecordType = ControlRecordType.UNKNOWN;
break;
}
return DecodeResult.get(MemoryRecords.withEndTransactionMarker(baseOffset, metadata.getPublishTime(), 0, metadata.getTxnidMostBits(), (short) metadata.getTxnidLeastBits(), new EndTransactionMarker(controlRecordType, 0)));
}
long startConversionNanos = MathUtils.nowInNano();
final int uncompressedSize = metadata.getUncompressedSize();
final CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(metadata.getCompression());
final ByteBuf uncompressedPayload = codec.decode(payload, uncompressedSize);
final DirectBufferOutputStream directBufferOutputStream = new DirectBufferOutputStream(DEFAULT_BUFFER_SIZE);
final MemoryRecordsBuilder builder = new MemoryRecordsBuilder(directBufferOutputStream, magic, CompressionType.NONE, TimestampType.CREATE_TIME, baseOffset, metadata.getPublishTime(), RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, metadata.hasTxnidMostBits() && metadata.hasTxnidLeastBits(), false, RecordBatch.NO_PARTITION_LEADER_EPOCH, MAX_RECORDS_BUFFER_SIZE);
if (metadata.hasTxnidMostBits()) {
builder.setProducerState(metadata.getTxnidMostBits(), (short) metadata.getTxnidLeastBits(), 0, true);
}
int conversionCount = 0;
if (metadata.hasNumMessagesInBatch()) {
final int numMessages = metadata.getNumMessagesInBatch();
conversionCount += numMessages;
for (int i = 0; i < numMessages; i++) {
final SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata();
final ByteBuf singleMessagePayload = Commands.deSerializeSingleMessageInBatch(uncompressedPayload, singleMessageMetadata, i, numMessages);
final long timestamp = (metadata.getEventTime() > 0) ? metadata.getEventTime() : metadata.getPublishTime();
final ByteBuffer value = singleMessageMetadata.isNullValue() ? null : getNioBuffer(singleMessagePayload);
if (magic >= RecordBatch.MAGIC_VALUE_V2) {
final Header[] headers = getHeadersFromMetadata(singleMessageMetadata.getPropertiesList());
builder.appendWithOffset(baseOffset + i, timestamp, getKeyByteBuffer(singleMessageMetadata), value, headers);
} else {
// record less than magic=2, no header attribute
builder.appendWithOffset(baseOffset + i, timestamp, getKeyByteBuffer(singleMessageMetadata), value);
}
singleMessagePayload.release();
}
} else {
conversionCount += 1;
final long timestamp = (metadata.getEventTime() > 0) ? metadata.getEventTime() : metadata.getPublishTime();
if (magic >= RecordBatch.MAGIC_VALUE_V2) {
final Header[] headers = getHeadersFromMetadata(metadata.getPropertiesList());
builder.appendWithOffset(baseOffset, timestamp, getKeyByteBuffer(metadata), getNioBuffer(uncompressedPayload), headers);
} else {
builder.appendWithOffset(baseOffset, timestamp, getKeyByteBuffer(metadata), getNioBuffer(uncompressedPayload));
}
}
final MemoryRecords records = builder.build();
uncompressedPayload.release();
return DecodeResult.get(records, directBufferOutputStream.getByteBuf(), conversionCount, MathUtils.elapsedNanos(startConversionNanos));
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class KopLogValidator method buildRecordsAndAssignOffsets.
private static ValidationAndOffsetAssignResult buildRecordsAndAssignOffsets(byte magic, LongRef offsetCounter, TimestampType timestampType, CompressionType compressionType, long logAppendTime, ArrayList<Record> validatedRecords, MutableRecordBatch first) {
long startConversionNanos = MathUtils.nowInNano();
long producerId = first.producerId();
short producerEpoch = first.producerEpoch();
int baseSequence = first.baseSequence();
boolean isTransactional = first.isTransactional();
int estimatedSize = AbstractRecords.estimateSizeInBytes(magic, offsetCounter.value(), compressionType, validatedRecords);
ByteBuffer buffer = ByteBuffer.allocate(estimatedSize);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compressionType, timestampType, offsetCounter.value(), logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, RecordBatch.NO_PARTITION_LEADER_EPOCH);
validatedRecords.forEach(record -> {
builder.appendWithOffset(offsetCounter.getAndIncrement(), record);
});
MemoryRecords memoryRecords = builder.build();
int conversionCount = builder.numRecords();
return ValidationAndOffsetAssignResult.get(memoryRecords, conversionCount, MathUtils.elapsedNanos(startConversionNanos));
}
Aggregations