use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class RecordAccumulator method append.
/**
* Add a record to the accumulator, return the append result
* <p>
* The append result will contain the future metadata, and flag for whether the appended batch is full or a new batch is created
* <p>
*
* @param tp The topic/partition to which this record is being sent
* @param timestamp The timestamp of the record
* @param key The key for the record
* @param value The value for the record
* @param callback The user-supplied callback to execute when the request is complete
* @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available
*/
public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Callback callback, long maxTimeToBlock) throws InterruptedException {
// We keep track of the number of appending thread to make sure we do not miss batches in
// abortIncompleteBatches().
appendsInProgress.incrementAndGet();
ByteBuffer buffer = null;
try {
// check if we have an in-progress batch
Deque<ProducerBatch> dq = getOrCreateDeque(tp);
synchronized (dq) {
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
RecordAppendResult appendResult = tryAppend(timestamp, key, value, callback, dq);
if (appendResult != null)
return appendResult;
}
// we don't have an in-progress record batch try to allocate a new batch
int size = Math.max(this.batchSize, Records.LOG_OVERHEAD + Record.recordSize(key, value));
log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition());
buffer = free.allocate(size, maxTimeToBlock);
synchronized (dq) {
// Need to check if producer is closed again after grabbing the dequeue lock.
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
RecordAppendResult appendResult = tryAppend(timestamp, key, value, callback, dq);
if (appendResult != null) {
// Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
return appendResult;
}
MemoryRecordsBuilder recordsBuilder = MemoryRecords.builder(buffer, compression, TimestampType.CREATE_TIME, this.batchSize);
ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds());
FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, callback, time.milliseconds()));
dq.addLast(batch);
incomplete.add(batch);
// Don't deallocate this buffer in the finally block as it's being used in the record batch
buffer = null;
return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true);
}
} finally {
if (buffer != null)
free.deallocate(buffer);
appendsInProgress.decrementAndGet();
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatchTest method testAppendedChecksumMagicV0AndV1.
@Test
public void testAppendedChecksumMagicV0AndV1() {
for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
byte[] key = "hi".getBytes();
byte[] value = "there".getBytes();
FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now);
assertNotNull(future);
byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME);
long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value);
assertEquals(expectedChecksum, future.checksumOrNull().longValue());
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class RecordBatchIterationBenchmark method createBatch.
private ByteBuffer createBatch(int batchSize) {
byte[] value = new byte[messageSize];
final ByteBuffer buf = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytesUpperBound(messageVersion, compressionType, new byte[0], value, Record.EMPTY_HEADERS) * batchSize);
final MemoryRecordsBuilder builder = MemoryRecords.builder(buf, messageVersion, compressionType, TimestampType.CREATE_TIME, startingOffset);
for (int i = 0; i < batchSize; ++i) {
switch(bytes) {
case ONES:
Arrays.fill(value, (byte) 1);
break;
case RANDOM:
random.nextBytes(value);
break;
}
builder.append(0, null, value);
}
return builder.build().buffer();
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method fetchResponse.
private FetchResponse fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
LinkedHashMap<TopicPartition, PartitionData> tpResponses = new LinkedHashMap<>();
for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) {
TopicPartition partition = fetchEntry.getKey();
long fetchOffset = fetchEntry.getValue().offset;
int fetchCount = fetchEntry.getValue().count;
final MemoryRecords records;
if (fetchCount == 0) {
records = MemoryRecords.EMPTY;
} else {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, fetchOffset);
for (int i = 0; i < fetchCount; i++) builder.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes());
records = builder.build();
}
tpResponses.put(partition, new FetchResponse.PartitionData(Errors.NONE, 0, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
}
return new FetchResponse(Errors.NONE, tpResponses, 0, INVALID_SESSION_ID);
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class ProduceRequestTest method testV3AndAboveShouldContainOnlyOneRecordBatch.
@Test
public void testV3AndAboveShouldContainOnlyOneRecordBatch() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(11L, "1".getBytes(), "b".getBytes());
builder.append(12L, null, "c".getBytes());
builder.close();
buffer.flip();
Map<TopicPartition, MemoryRecords> produceData = new HashMap<>();
produceData.put(new TopicPartition("test", 0), MemoryRecords.readableRecords(buffer));
ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forCurrentMagic((short) 1, 5000, produceData);
assertThrowsInvalidRecordExceptionForAllVersions(requestBuilder);
}
Aggregations