Search in sources :

Example 16 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class AbstractWindowBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final WindowStore<Integer, String> windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
    final Time time = new SystemTime();
    context.setSystemTimeMs(time.milliseconds());
    context.setTime(1L);
    windowStore.init((StateStoreContext) context, windowStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
        windowStore.put(1, "initial record", 2 * RETENTION_PERIOD);
        // Try inserting a record with timestamp 0 -- should be dropped
        windowStore.put(1, "late record", 0L);
        windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    windowStore.close();
}
Also used : Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 17 with Metric

use of org.apache.kafka.common.Metric in project apache-kafka-on-k8s by banzaicloud.

the class ToolsUtils method printMetrics.

/**
 * print out the metrics in alphabetical order
 * @param metrics   the metrics to be printed out
 */
public static void printMetrics(Map<MetricName, ? extends Metric> metrics) {
    if (metrics != null && !metrics.isEmpty()) {
        int maxLengthOfDisplayName = 0;
        TreeMap<String, Object> sortedMetrics = new TreeMap<>(new Comparator<String>() {

            @Override
            public int compare(String o1, String o2) {
                return o1.compareTo(o2);
            }
        });
        for (Metric metric : metrics.values()) {
            MetricName mName = metric.metricName();
            String mergedName = mName.group() + ":" + mName.name() + ":" + mName.tags();
            maxLengthOfDisplayName = maxLengthOfDisplayName < mergedName.length() ? mergedName.length() : maxLengthOfDisplayName;
            sortedMetrics.put(mergedName, metric.metricValue());
        }
        String doubleOutputFormat = "%-" + maxLengthOfDisplayName + "s : %.3f";
        String defaultOutputFormat = "%-" + maxLengthOfDisplayName + "s : %s";
        System.out.println(String.format("\n%-" + maxLengthOfDisplayName + "s   %s", "Metric Name", "Value"));
        for (Map.Entry<String, Object> entry : sortedMetrics.entrySet()) {
            String outputFormat;
            if (entry.getValue() instanceof Double)
                outputFormat = doubleOutputFormat;
            else
                outputFormat = defaultOutputFormat;
            System.out.println(String.format(outputFormat, entry.getKey(), entry.getValue()));
        }
    }
}
Also used : MetricName(org.apache.kafka.common.MetricName) Metric(org.apache.kafka.common.Metric) TreeMap(java.util.TreeMap) TreeMap(java.util.TreeMap) Map(java.util.Map)

Example 18 with Metric

use of org.apache.kafka.common.Metric in project apache-kafka-on-k8s by banzaicloud.

the class MetricsTest method testPercentiles.

@Test
public void testPercentiles() {
    int buckets = 100;
    Percentiles percs = new Percentiles(4 * buckets, 0.0, 100.0, BucketSizing.CONSTANT, new Percentile(metrics.metricName("test.p25", "grp1"), 25), new Percentile(metrics.metricName("test.p50", "grp1"), 50), new Percentile(metrics.metricName("test.p75", "grp1"), 75));
    MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
    Sensor sensor = metrics.sensor("test", config);
    sensor.add(percs);
    Metric p25 = this.metrics.metrics().get(metrics.metricName("test.p25", "grp1"));
    Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
    Metric p75 = this.metrics.metrics().get(metrics.metricName("test.p75", "grp1"));
    // record two windows worth of sequential values
    for (int i = 0; i < buckets; i++) sensor.record(i);
    assertEquals(25, p25.value(), 1.0);
    assertEquals(50, p50.value(), 1.0);
    assertEquals(75, p75.value(), 1.0);
    for (int i = 0; i < buckets; i++) sensor.record(0.0);
    assertEquals(0.0, p25.value(), 1.0);
    assertEquals(0.0, p50.value(), 1.0);
    assertEquals(0.0, p75.value(), 1.0);
    // record two more windows worth of sequential values
    for (int i = 0; i < buckets; i++) sensor.record(i);
    assertEquals(25, p25.value(), 1.0);
    assertEquals(50, p50.value(), 1.0);
    assertEquals(75, p75.value(), 1.0);
}
Also used : Percentile(org.apache.kafka.common.metrics.stats.Percentile) Metric(org.apache.kafka.common.Metric) Percentiles(org.apache.kafka.common.metrics.stats.Percentiles) Test(org.junit.Test)

Example 19 with Metric

use of org.apache.kafka.common.Metric in project samza by apache.

the class KafkaConsumerProxy method populateCurrentLags.

// The only way to figure out lag for the KafkaConsumer is to look at the metrics after each poll() call.
// One of the metrics (records-lag) shows how far behind the HighWatermark the consumer is.
// This method populates the lag information for each SSP into latestLags member variable.
private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
    Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
    // populate the MetricNames first time
    if (perPartitionMetrics.isEmpty()) {
        for (SystemStreamPartition ssp : ssps) {
            TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
            // These are required by the KafkaConsumer to get the metrics
            HashMap<String, String> tags = new HashMap<>();
            tags.put("client-id", clientId);
            // kafka replaces '.' with underscore '_' in many/all of their metrics tags for topic names.
            // see https://github.com/apache/kafka/commit/5d81639907869ce7355c40d2bac176a655e52074#diff-b45245913eaae46aa847d2615d62cde0R1331
            tags.put("topic", tp.topic().replace('.', '_'));
            tags.put("partition", Integer.toString(tp.partition()));
            perPartitionMetrics.put(ssp, new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags));
        }
    }
    for (SystemStreamPartition ssp : ssps) {
        MetricName mn = perPartitionMetrics.get(ssp);
        Metric currentLagMetric = consumerMetrics.get(mn);
        // High watermark is fixed to be the offset of last available message,
        // so the lag is now at least 0, which is the same as Samza's definition.
        // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
        long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
        latestLags.put(ssp, currentLag);
        // calls the setIsAtHead for the BlockingEnvelopeMap
        sink.setIsAtHighWatermark(ssp, currentLag == 0);
    }
}
Also used : MetricName(org.apache.kafka.common.MetricName) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Metric(org.apache.kafka.common.Metric) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition)

Example 20 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class MetricsTest method testConcurrentReadUpdateReport.

/**
 * Verifies that concurrent sensor add, remove, updates and read with a metrics reporter
 * that synchronizes on every reporter method doesn't result in errors or deadlock.
 */
@Test
public void testConcurrentReadUpdateReport() throws Exception {
    class LockingReporter implements MetricsReporter {

        Map<MetricName, KafkaMetric> activeMetrics = new HashMap<>();

        @Override
        public synchronized void init(List<KafkaMetric> metrics) {
        }

        @Override
        public synchronized void metricChange(KafkaMetric metric) {
            activeMetrics.put(metric.metricName(), metric);
        }

        @Override
        public synchronized void metricRemoval(KafkaMetric metric) {
            activeMetrics.remove(metric.metricName(), metric);
        }

        @Override
        public synchronized void close() {
        }

        @Override
        public void configure(Map<String, ?> configs) {
        }

        synchronized void processMetrics() {
            for (KafkaMetric metric : activeMetrics.values()) {
                assertNotNull(metric.metricValue(), "Invalid metric value");
            }
        }
    }
    final LockingReporter reporter = new LockingReporter();
    this.metrics.close();
    this.metrics = new Metrics(config, Arrays.asList(reporter), new MockTime(10), true);
    final Deque<Sensor> sensors = new ConcurrentLinkedDeque<>();
    SensorCreator sensorCreator = new SensorCreator(metrics);
    final Random random = new Random();
    final AtomicBoolean alive = new AtomicBoolean(true);
    executorService = Executors.newFixedThreadPool(3);
    Future<?> writeFuture = executorService.submit(new ConcurrentMetricOperation(alive, "record", () -> sensors.forEach(sensor -> sensor.record(random.nextInt(10000)))));
    Future<?> readFuture = executorService.submit(new ConcurrentMetricOperation(alive, "read", () -> sensors.forEach(sensor -> sensor.metrics().forEach(metric -> assertNotNull(metric.metricValue(), "Invalid metric value")))));
    Future<?> reportFuture = executorService.submit(new ConcurrentMetricOperation(alive, "report", reporter::processMetrics));
    for (int i = 0; i < 10000; i++) {
        if (sensors.size() > 10) {
            Sensor sensor = random.nextBoolean() ? sensors.removeFirst() : sensors.removeLast();
            metrics.removeSensor(sensor.name());
        }
        StatType statType = StatType.forId(random.nextInt(StatType.values().length));
        sensors.add(sensorCreator.createSensor(statType, i));
    }
    assertFalse(readFuture.isDone(), "Read failed");
    assertFalse(writeFuture.isDone(), "Write failed");
    assertFalse(reportFuture.isDone(), "Report failed");
    alive.set(false);
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Max(org.apache.kafka.common.metrics.stats.Max) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) MockTime(org.apache.kafka.common.utils.MockTime) Rate(org.apache.kafka.common.metrics.stats.Rate) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) Deque(java.util.Deque) Function(java.util.function.Function) Collections.singletonList(java.util.Collections.singletonList) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) Percentile(org.apache.kafka.common.metrics.stats.Percentile) BucketSizing(org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) WindowedSum(org.apache.kafka.common.metrics.stats.WindowedSum) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ExecutorService(java.util.concurrent.ExecutorService) Value(org.apache.kafka.common.metrics.stats.Value) Logger(org.slf4j.Logger) Collections.emptyList(java.util.Collections.emptyList) CumulativeSum(org.apache.kafka.common.metrics.stats.CumulativeSum) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) Test(org.junit.jupiter.api.Test) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) WindowedCount(org.apache.kafka.common.metrics.stats.WindowedCount) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Avg(org.apache.kafka.common.metrics.stats.Avg) Min(org.apache.kafka.common.metrics.stats.Min) SimpleRate(org.apache.kafka.common.metrics.stats.SimpleRate) Meter(org.apache.kafka.common.metrics.stats.Meter) Percentiles(org.apache.kafka.common.metrics.stats.Percentiles) Collections(java.util.Collections) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) Collections.singletonList(java.util.Collections.singletonList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.jupiter.api.Test)

Aggregations

Metric (org.apache.kafka.common.Metric)42 MetricName (org.apache.kafka.common.MetricName)24 Test (org.junit.Test)18 MockTime (org.apache.kafka.common.utils.MockTime)14 StreamsConfig (org.apache.kafka.streams.StreamsConfig)13 Map (java.util.Map)11 Test (org.junit.jupiter.api.Test)11 Metrics (org.apache.kafka.common.metrics.Metrics)10 Collections (java.util.Collections)9 HashMap (java.util.HashMap)9 Properties (java.util.Properties)9 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)9 StreamsMetricsImpl (org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl)9 ArrayList (java.util.ArrayList)8 TaskId (org.apache.kafka.streams.processor.TaskId)8 List (java.util.List)7 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsException (org.apache.kafka.streams.errors.StreamsException)7 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)7 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)6