Search in sources :

Example 21 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class MetricsTest method testPercentiles.

@Test
public void testPercentiles() {
    int buckets = 100;
    Percentiles percs = new Percentiles(4 * buckets, 0.0, 100.0, BucketSizing.CONSTANT, new Percentile(metrics.metricName("test.p25", "grp1"), 25), new Percentile(metrics.metricName("test.p50", "grp1"), 50), new Percentile(metrics.metricName("test.p75", "grp1"), 75));
    MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
    Sensor sensor = metrics.sensor("test", config);
    sensor.add(percs);
    Metric p25 = this.metrics.metrics().get(metrics.metricName("test.p25", "grp1"));
    Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
    Metric p75 = this.metrics.metrics().get(metrics.metricName("test.p75", "grp1"));
    // record two windows worth of sequential values
    for (int i = 0; i < buckets; i++) sensor.record(i);
    assertEquals(25, (Double) p25.metricValue(), 1.0);
    assertEquals(50, (Double) p50.metricValue(), 1.0);
    assertEquals(75, (Double) p75.metricValue(), 1.0);
    for (int i = 0; i < buckets; i++) sensor.record(0.0);
    assertEquals(0.0, (Double) p25.metricValue(), 1.0);
    assertEquals(0.0, (Double) p50.metricValue(), 1.0);
    assertEquals(0.0, (Double) p75.metricValue(), 1.0);
    // record two more windows worth of sequential values
    for (int i = 0; i < buckets; i++) sensor.record(i);
    assertEquals(25, (Double) p25.metricValue(), 1.0);
    assertEquals(50, (Double) p50.metricValue(), 1.0);
    assertEquals(75, (Double) p75.metricValue(), 1.0);
}
Also used : Percentile(org.apache.kafka.common.metrics.stats.Percentile) Metric(org.apache.kafka.common.Metric) Percentiles(org.apache.kafka.common.metrics.stats.Percentiles) Test(org.junit.jupiter.api.Test)

Example 22 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class MetricsTest method shouldPinLargerValuesToMax.

@Test
public void shouldPinLargerValuesToMax() {
    final double min = 0.0d;
    final double max = 100d;
    Percentiles percs = new Percentiles(1000, min, max, BucketSizing.LINEAR, new Percentile(metrics.metricName("test.p50", "grp1"), 50));
    MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
    Sensor sensor = metrics.sensor("test", config);
    sensor.add(percs);
    Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
    sensor.record(max + 100);
    sensor.record(max + 100);
    assertEquals(max, (double) p50.metricValue(), 0d);
}
Also used : Percentile(org.apache.kafka.common.metrics.stats.Percentile) Metric(org.apache.kafka.common.Metric) Percentiles(org.apache.kafka.common.metrics.stats.Percentiles) Test(org.junit.jupiter.api.Test)

Example 23 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class RocksDBMetricsIntegrationTest method checkMetricByName.

private void checkMetricByName(final List<Metric> listMetric, final String metricName, final int numMetric) {
    final List<Metric> metrics = listMetric.stream().filter(m -> m.metricName().name().equals(metricName)).collect(Collectors.toList());
    assertThat("Size of metrics of type:'" + metricName + "' must be equal to " + numMetric + " but it's equal to " + metrics.size(), metrics.size(), is(numMetric));
    for (final Metric metric : metrics) {
        assertThat("Metric:'" + metric.metricName() + "' must be not null", metric.metricValue(), is(notNullValue()));
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) Produced(org.apache.kafka.streams.kstream.Produced) Stores(org.apache.kafka.streams.state.Stores) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) IntegrationTest(org.apache.kafka.test.IntegrationTest) WindowStore(org.apache.kafka.streams.state.WindowStore) ArrayList(java.util.ArrayList) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) After(org.junit.After) Duration(java.time.Duration) Metric(org.apache.kafka.common.Metric) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) Utils(org.apache.kafka.common.utils.Utils) Sensor(org.apache.kafka.common.metrics.Sensor) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) Consumed(org.apache.kafka.streams.kstream.Consumed) Parameter(org.junit.runners.Parameterized.Parameter) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) Test(org.junit.Test) IOException(java.io.IOException) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Rule(org.junit.Rule) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Materialized(org.apache.kafka.streams.kstream.Materialized) Matchers.is(org.hamcrest.Matchers.is) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Collections(java.util.Collections) Metric(org.apache.kafka.common.Metric)

Example 24 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.

@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(new Exception()), new AlwaysContinueProductionExceptionHandler(), streamsMetrics);
    try (final LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) {
        collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
        collector.flush();
        final List<String> messages = logCaptureAppender.getMessages();
        final StringBuilder errorMessage = new StringBuilder("Messages received:");
        for (final String error : messages) {
            errorMessage.append("\n - ").append(error);
        }
        assertTrue(errorMessage.toString(), messages.get(messages.size() - 1).endsWith("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded."));
    }
    final Metric metric = streamsMetrics.metrics().get(new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", Thread.currentThread().getName()), mkEntry("task-id", taskId.toString()))));
    assertEquals(1.0, metric.metricValue());
    collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
    collector.flush();
    collector.closeClean();
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 25 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class IntegrationTestUtils method waitForCompletion.

/**
 * Wait for streams to "finish", based on the consumer lag metric. Includes only the main consumer, for
 * completion of standbys as well see {@link #waitForStandbyCompletion}
 *
 * Caveats:
 * - Inputs must be finite, fully loaded, and flushed before this method is called
 * - expectedPartitions is the total number of partitions to watch the lag on, including both input and internal.
 *   It's somewhat ok to get this wrong, as the main failure case would be an immediate return due to the clients
 *   not being initialized, which you can avoid with any non-zero value. But it's probably better to get it right ;)
 */
public static void waitForCompletion(final KafkaStreams streams, final int expectedPartitions, final long timeoutMilliseconds) {
    final long start = System.currentTimeMillis();
    while (true) {
        int lagMetrics = 0;
        double totalLag = 0.0;
        for (final Metric metric : streams.metrics().values()) {
            if (metric.metricName().name().equals("records-lag")) {
                if (!metric.metricName().tags().get("client-id").endsWith("restore-consumer")) {
                    lagMetrics++;
                    totalLag += ((Number) metric.metricValue()).doubleValue();
                }
            }
        }
        if (lagMetrics >= expectedPartitions && totalLag == 0.0) {
            return;
        }
        if (System.currentTimeMillis() - start >= timeoutMilliseconds) {
            throw new RuntimeException(String.format("Timed out waiting for completion. lagMetrics=[%s/%s] totalLag=[%s]", lagMetrics, expectedPartitions, totalLag));
        }
    }
}
Also used : Metric(org.apache.kafka.common.Metric)

Aggregations

Metric (org.apache.kafka.common.Metric)42 MetricName (org.apache.kafka.common.MetricName)24 Test (org.junit.Test)18 MockTime (org.apache.kafka.common.utils.MockTime)14 StreamsConfig (org.apache.kafka.streams.StreamsConfig)13 Map (java.util.Map)11 Test (org.junit.jupiter.api.Test)11 Metrics (org.apache.kafka.common.metrics.Metrics)10 Collections (java.util.Collections)9 HashMap (java.util.HashMap)9 Properties (java.util.Properties)9 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)9 StreamsMetricsImpl (org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl)9 ArrayList (java.util.ArrayList)8 TaskId (org.apache.kafka.streams.processor.TaskId)8 List (java.util.List)7 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsException (org.apache.kafka.streams.errors.StreamsException)7 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)7 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)6