use of org.apache.kafka.common.metrics.KafkaMetric in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchResponseMetricsWithOnePartitionError.
@Test
public void testFetchResponseMetricsWithOnePartitionError() {
subscriptions.assignFromUser(Utils.mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicPartition, FetchResponse.PartitionData> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
partitions.put(tp1, new FetchResponse.PartitionData(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY));
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
consumerClient.poll(0);
fetcher.fetchedRecords();
int expectedBytes = 0;
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(3, recordsCountAverage.value(), EPSILON);
}
use of org.apache.kafka.common.metrics.KafkaMetric in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchResponseMetrics.
@Test
public void testFetchResponseMetrics() {
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
Cluster cluster = TestUtils.clusterWith(1, partitionCounts);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
subscriptions.assignFromUser(Utils.mkSet(tp1, tp2));
int expectedBytes = 0;
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicPartition tp : Utils.mkSet(tp1, tp2)) {
subscriptions.seek(tp, 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponse.PartitionData(Errors.NONE, 15L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(new FetchResponse(Errors.NONE, fetchPartitionData, 0, INVALID_SESSION_ID));
consumerClient.poll(0);
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(6, recordsCountAverage.value(), EPSILON);
}
use of org.apache.kafka.common.metrics.KafkaMetric in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetcherLeadMetric.
@Test
public void testFetcherLeadMetric() {
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName minLeadMetric = metrics.metricInstance(metricsRegistry.recordsLeadMin);
Map<String, String> tags = new HashMap<>(2);
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLeadMetric = metrics.metricName("records-lead", metricGroup, "", tags);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLeadMin = allMetrics.get(minLeadMetric);
// recordsFetchLeadMin should be initialized to MAX_VALUE
assertEquals(Double.MAX_VALUE, recordsFetchLeadMin.value(), EPSILON);
// recordsFetchLeadMin should be position - logStartOffset after receiving an empty FetchResponse
fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, -1L, 0L, 0);
assertEquals(0L, recordsFetchLeadMin.value(), EPSILON);
KafkaMetric partitionLead = allMetrics.get(partitionLeadMetric);
assertEquals(0L, partitionLead.value(), EPSILON);
// recordsFetchLeadMin should be position - logStartOffset after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) {
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
}
fetchRecords(tp0, builder.build(), Errors.NONE, 200L, -1L, 0L, 0);
assertEquals(0L, recordsFetchLeadMin.value(), EPSILON);
assertEquals(3L, partitionLead.value(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
assertFalse(allMetrics.containsKey(partitionLeadMetric));
}
use of org.apache.kafka.common.metrics.KafkaMetric in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testReadCommittedLagMetric.
@Test
public void testReadCommittedLagMetric() {
Metrics metrics = new Metrics();
fetcher = createFetcher(subscriptions, metrics, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
Map<String, String> tags = new HashMap<>();
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
MetricName partitionLagMetricDeprecated = metrics.metricName(tp0 + ".records-lag", metricGroup);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
// recordsFetchLagMax should be initialized to negative infinity
assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON);
// recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
assertEquals(50, recordsFetchLagMax.value(), EPSILON);
KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
assertEquals(50, partitionLag.value(), EPSILON);
KafkaMetric partitionLagDeprecated = allMetrics.get(partitionLagMetricDeprecated);
assertEquals(50, partitionLagDeprecated.value(), EPSILON);
// recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 150L, 0);
assertEquals(147, recordsFetchLagMax.value(), EPSILON);
assertEquals(147, partitionLag.value(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
assertFalse(allMetrics.containsKey(partitionLagMetric));
assertFalse(allMetrics.containsKey(partitionLagMetricDeprecated));
}
use of org.apache.kafka.common.metrics.KafkaMetric in project kafka by apache.
the class FetcherTest method testFetchResponseMetrics.
@Test
public void testFetchResponseMetrics() {
buildFetcher();
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
subscriptions.assignFromUser(mkSet(tp1, tp2));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
int expectedBytes = 0;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
subscriptions.seek(tp.topicPartition(), 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(15).setLogStartOffset(0).setRecords(records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Aggregations