Search in sources :

Example 6 with MetricConfig

use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.

the class RecordAccumulator method registerMetrics.

private void registerMetrics(Metrics metrics, String metricGrpName) {
    MetricName metricName = metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records");
    Measurable waitingThreads = new Measurable() {

        public double measure(MetricConfig config, long now) {
            return free.queued();
        }
    };
    metrics.addMetric(metricName, waitingThreads);
    metricName = metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used).");
    Measurable totalBytes = new Measurable() {

        public double measure(MetricConfig config, long now) {
            return free.totalMemory();
        }
    };
    metrics.addMetric(metricName, totalBytes);
    metricName = metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list).");
    Measurable availableBytes = new Measurable() {

        public double measure(MetricConfig config, long now) {
            return free.availableMemory();
        }
    };
    metrics.addMetric(metricName, availableBytes);
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) MetricName(org.apache.kafka.common.MetricName) Measurable(org.apache.kafka.common.metrics.Measurable)

Example 7 with MetricConfig

use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.

the class ProducerMetrics method main.

public static void main(String[] args) {
    Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id");
    MetricConfig metricConfig = new MetricConfig().tags(metricTags);
    Metrics metrics = new Metrics(metricConfig);
    ProducerMetrics metricsRegistry = new ProducerMetrics(metrics);
    System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates()));
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) Metrics(org.apache.kafka.common.metrics.Metrics)

Example 8 with MetricConfig

use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.

the class KafkaConsumer method buildMetrics.

private static Metrics buildMetrics(ConsumerConfig config, Time time, String clientId) {
    Map<String, String> metricsTags = Collections.singletonMap(CLIENT_ID_METRIC_TAG, clientId);
    MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)).timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS).recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))).tags(metricsTags);
    List<MetricsReporter> reporters = config.getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class, Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId));
    JmxReporter jmxReporter = new JmxReporter();
    jmxReporter.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)));
    reporters.add(jmxReporter);
    MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
    return new Metrics(metricConfig, reporters, time, metricsContext);
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) KafkaConsumerMetrics(org.apache.kafka.clients.consumer.internals.KafkaConsumerMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) MetricsReporter(org.apache.kafka.common.metrics.MetricsReporter) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) JmxReporter(org.apache.kafka.common.metrics.JmxReporter)

Example 9 with MetricConfig

use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.

the class FetcherTest method testPreferredReadReplicaOffsetError.

@Test
public void testPreferredReadReplicaOffsetError() {
    buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
    subscriptions.assignFromUser(singleton(tp0));
    client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    fetchedRecords();
    Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
    assertEquals(selected.id(), 1);
    // Return an error, should unset the preferred read replica
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.OFFSET_OUT_OF_RANGE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.empty()));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    fetchedRecords();
    selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
    assertEquals(selected.id(), -1);
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) Node(org.apache.kafka.common.Node) Test(org.junit.jupiter.api.Test)

Example 10 with MetricConfig

use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.

the class StreamTaskTest method shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes.

@Test
public void shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes() {
    time = new MockTime(0L, 0L, 0L);
    metrics = new Metrics(new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO), time);
    // Create a processor that only forwards even keys to test the metrics at the source and terminal nodes
    final MockSourceNode<Integer, Integer> evenKeyForwardingSourceNode = new MockSourceNode<Integer, Integer>(intDeserializer, intDeserializer) {

        InternalProcessorContext<Integer, Integer> context;

        @Override
        public void init(final InternalProcessorContext<Integer, Integer> context) {
            this.context = context;
            super.init(context);
        }

        @Override
        public void process(final Record<Integer, Integer> record) {
            if (record.key() % 2 == 0) {
                context.forward(record);
            }
        }
    };
    task = createStatelessTaskWithForwardingTopology(evenKeyForwardingSourceNode);
    task.initializeIfNeeded();
    task.completeRestoration(noOpResetter -> {
    });
    final String sourceNodeName = evenKeyForwardingSourceNode.name();
    final String terminalNodeName = processorStreamTime.name();
    final Metric sourceAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric sourceMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric sourceMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    // e2e latency = 10
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(0, 0L)));
    task.process(10L);
    assertThat(sourceAvg.metricValue(), equalTo(10.0));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(10.0));
    // key 0: reaches terminal node
    assertThat(terminalAvg.metricValue(), equalTo(10.0));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(10.0));
    // e2e latency = 15
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(1, 0L)));
    task.process(15L);
    assertThat(sourceAvg.metricValue(), equalTo(12.5));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(15.0));
    // key 1: stops at source, doesn't affect terminal node metrics
    assertThat(terminalAvg.metricValue(), equalTo(10.0));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(10.0));
    // e2e latency = 23
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(2, 0L)));
    task.process(23L);
    assertThat(sourceAvg.metricValue(), equalTo(16.0));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(23.0));
    // key 2: reaches terminal node
    assertThat(terminalAvg.metricValue(), equalTo(16.5));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(23.0));
    // e2e latency = 5
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(3, 0L)));
    task.process(5L);
    assertThat(sourceAvg.metricValue(), equalTo(13.25));
    assertThat(sourceMin.metricValue(), equalTo(5.0));
    assertThat(sourceMax.metricValue(), equalTo(23.0));
    // key 3: stops at source, doesn't affect terminal node metrics
    assertThat(terminalAvg.metricValue(), equalTo(16.5));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(23.0));
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) Metrics(org.apache.kafka.common.metrics.Metrics) MockSourceNode(org.apache.kafka.test.MockSourceNode) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Record(org.apache.kafka.streams.processor.api.Record) Metric(org.apache.kafka.common.Metric) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Aggregations

MetricConfig (org.apache.kafka.common.metrics.MetricConfig)44 Metrics (org.apache.kafka.common.metrics.Metrics)32 MetricName (org.apache.kafka.common.MetricName)23 Test (org.junit.Test)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)8 Sensor (org.apache.kafka.common.metrics.Sensor)8 MockTime (org.apache.kafka.common.utils.MockTime)8 HashSet (java.util.HashSet)7 LinkedHashMap (java.util.LinkedHashMap)7 MetricNameTemplate (org.apache.kafka.common.MetricNameTemplate)7 JmxReporter (org.apache.kafka.common.metrics.JmxReporter)7 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)7 ArrayList (java.util.ArrayList)6 List (java.util.List)6 KafkaException (org.apache.kafka.common.KafkaException)6 Collections (java.util.Collections)5 Map (java.util.Map)5 TimeUnit (java.util.concurrent.TimeUnit)5 Metric (org.apache.kafka.common.Metric)5 TopicPartition (org.apache.kafka.common.TopicPartition)5