use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetcherLeadMetric.
@Test
public void testFetcherLeadMetric() {
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName minLeadMetric = metrics.metricInstance(metricsRegistry.recordsLeadMin);
Map<String, String> tags = new HashMap<>(2);
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLeadMetric = metrics.metricName("records-lead", metricGroup, "", tags);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLeadMin = allMetrics.get(minLeadMetric);
// recordsFetchLeadMin should be initialized to MAX_VALUE
assertEquals(Double.MAX_VALUE, recordsFetchLeadMin.value(), EPSILON);
// recordsFetchLeadMin should be position - logStartOffset after receiving an empty FetchResponse
fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, -1L, 0L, 0);
assertEquals(0L, recordsFetchLeadMin.value(), EPSILON);
KafkaMetric partitionLead = allMetrics.get(partitionLeadMetric);
assertEquals(0L, partitionLead.value(), EPSILON);
// recordsFetchLeadMin should be position - logStartOffset after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) {
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
}
fetchRecords(tp0, builder.build(), Errors.NONE, 200L, -1L, 0L, 0);
assertEquals(0L, recordsFetchLeadMin.value(), EPSILON);
assertEquals(3L, partitionLead.value(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
assertFalse(allMetrics.containsKey(partitionLeadMetric));
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testReadCommittedLagMetric.
@Test
public void testReadCommittedLagMetric() {
Metrics metrics = new Metrics();
fetcher = createFetcher(subscriptions, metrics, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
Map<String, String> tags = new HashMap<>();
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
MetricName partitionLagMetricDeprecated = metrics.metricName(tp0 + ".records-lag", metricGroup);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
// recordsFetchLagMax should be initialized to negative infinity
assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON);
// recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
assertEquals(50, recordsFetchLagMax.value(), EPSILON);
KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
assertEquals(50, partitionLag.value(), EPSILON);
KafkaMetric partitionLagDeprecated = allMetrics.get(partitionLagMetricDeprecated);
assertEquals(50, partitionLagDeprecated.value(), EPSILON);
// recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 150L, 0);
assertEquals(147, recordsFetchLagMax.value(), EPSILON);
assertEquals(147, partitionLag.value(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
assertFalse(allMetrics.containsKey(partitionLagMetric));
assertFalse(allMetrics.containsKey(partitionLagMetricDeprecated));
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testInvalidDefaultRecordBatch.
@Test
public void testInvalidDefaultRecordBatch() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
builder.append(10L, "key".getBytes(), "value".getBytes());
builder.close();
buffer.flip();
// Garble the CRC
buffer.position(17);
buffer.put("beef".getBytes());
buffer.position(0);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(0);
// the fetchedRecords() should always throw exception due to the bad batch.
for (int i = 0; i < 2; i++) {
try {
fetcher.fetchedRecords();
fail("fetchedRecords should have raised KafkaException");
} catch (KafkaException e) {
assertEquals(0, subscriptions.position(tp0).longValue());
}
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulator method append.
/**
* Add a record to the accumulator, return the append result
* <p>
* The append result will contain the future metadata, and flag for whether the appended batch is full or a new batch is created
* <p>
*
* @param tp The topic/partition to which this record is being sent
* @param timestamp The timestamp of the record
* @param key The key for the record
* @param value The value for the record
* @param headers the Headers for the record
* @param callback The user-supplied callback to execute when the request is complete
* @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available
*/
public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException {
// We keep track of the number of appending thread to make sure we do not miss batches in
// abortIncompleteBatches().
appendsInProgress.incrementAndGet();
ByteBuffer buffer = null;
if (headers == null)
headers = Record.EMPTY_HEADERS;
try {
// check if we have an in-progress batch
Deque<ProducerBatch> dq = getOrCreateDeque(tp);
synchronized (dq) {
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
if (appendResult != null)
return appendResult;
}
// we don't have an in-progress record batch try to allocate a new batch
byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers));
log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition());
buffer = free.allocate(size, maxTimeToBlock);
synchronized (dq) {
// Need to check if producer is closed again after grabbing the dequeue lock.
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
if (appendResult != null) {
// Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
return appendResult;
}
MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic);
ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds());
FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds()));
dq.addLast(batch);
incomplete.add(batch);
// Don't deallocate this buffer in the finally block as it's being used in the record batch
buffer = null;
return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true);
}
} finally {
if (buffer != null)
free.deallocate(buffer);
appendsInProgress.decrementAndGet();
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project starlight-for-kafka by datastax.
the class GroupMetadataManagerTest method newMemoryRecordsBuffer.
private ByteBuffer newMemoryRecordsBuffer(List<SimpleRecord> records, long producerId, short producerEpoch, boolean isTxnOffsetCommit) {
TimestampType timestampType = TimestampType.CREATE_TIME;
long timestamp = Time.SYSTEM.milliseconds();
ByteBuffer buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(RecordBatch.CURRENT_MAGIC_VALUE, offsetConfig.offsetsTopicCompressionType(), records));
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, offsetConfig.offsetsTopicCompressionType(), timestampType, 0L, timestamp, producerId, producerEpoch, 0, isTxnOffsetCommit, RecordBatch.NO_PARTITION_LEADER_EPOCH);
records.forEach(builder::append);
return builder.build().buffer();
}
Aggregations