Search in sources :

Example 1 with Metric

use of org.apache.kafka.common.Metric in project apache-kafka-on-k8s by banzaicloud.

the class FrequenciesTest method testUseWithMetrics.

@Test
@SuppressWarnings("deprecation")
public void testUseWithMetrics() {
    MetricName name1 = name("1");
    MetricName name2 = name("2");
    MetricName name3 = name("3");
    MetricName name4 = name("4");
    Frequencies frequencies = new Frequencies(4, 1.0, 4.0, new Frequency(name1, 1.0), new Frequency(name2, 2.0), new Frequency(name3, 3.0), new Frequency(name4, 4.0));
    Sensor sensor = metrics.sensor("test", config);
    sensor.add(frequencies);
    Metric metric1 = this.metrics.metrics().get(name1);
    Metric metric2 = this.metrics.metrics().get(name2);
    Metric metric3 = this.metrics.metrics().get(name3);
    Metric metric4 = this.metrics.metrics().get(name4);
    // Record 2 windows worth of values
    for (int i = 0; i != 100; ++i) {
        frequencies.record(config, i % 4 + 1, time.milliseconds());
    }
    assertEquals(0.25, metric1.value(), DELTA);
    assertEquals(0.25, metric2.value(), DELTA);
    assertEquals(0.25, metric3.value(), DELTA);
    assertEquals(0.25, metric4.value(), DELTA);
    // Record 2 windows worth of values
    for (int i = 0; i != 100; ++i) {
        frequencies.record(config, i % 2 + 1, time.milliseconds());
    }
    assertEquals(0.50, metric1.value(), DELTA);
    assertEquals(0.50, metric2.value(), DELTA);
    assertEquals(0.00, metric3.value(), DELTA);
    assertEquals(0.00, metric4.value(), DELTA);
    // that is half 1.0 and half 2.0
    for (int i = 0; i != 50; ++i) {
        frequencies.record(config, 4.0, time.milliseconds());
    }
    assertEquals(0.25, metric1.value(), DELTA);
    assertEquals(0.25, metric2.value(), DELTA);
    assertEquals(0.00, metric3.value(), DELTA);
    assertEquals(0.50, metric4.value(), DELTA);
}
Also used : MetricName(org.apache.kafka.common.MetricName) Metric(org.apache.kafka.common.Metric) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.Test)

Example 2 with Metric

use of org.apache.kafka.common.Metric in project flink by apache.

the class KafkaSourceReaderMetrics method maybeAddRecordsLagMetric.

/**
 * Add a partition's records-lag metric to tracking list if this partition never appears before.
 *
 * <p>This method also lazily register {@link
 * org.apache.flink.runtime.metrics.MetricNames#PENDING_RECORDS} in {@link
 * SourceReaderMetricGroup}
 *
 * @param consumer Kafka consumer
 * @param tp Topic partition
 */
public void maybeAddRecordsLagMetric(KafkaConsumer<?, ?> consumer, TopicPartition tp) {
    // Lazily register pendingRecords
    if (recordsLagMetrics == null) {
        this.recordsLagMetrics = new ConcurrentHashMap<>();
        this.sourceReaderMetricGroup.setPendingRecordsGauge(() -> {
            long pendingRecordsTotal = 0;
            for (Metric recordsLagMetric : this.recordsLagMetrics.values()) {
                pendingRecordsTotal += ((Double) recordsLagMetric.metricValue()).longValue();
            }
            return pendingRecordsTotal;
        });
    }
    recordsLagMetrics.computeIfAbsent(tp, (ignored) -> getRecordsLagMetric(consumer.metrics(), tp));
}
Also used : Metric(org.apache.kafka.common.Metric)

Example 3 with Metric

use of org.apache.kafka.common.Metric in project flink by apache.

the class FlinkKafkaProducer method initProducer.

private FlinkKafkaInternalProducer<byte[], byte[]> initProducer(boolean registerMetrics) {
    FlinkKafkaInternalProducer<byte[], byte[]> producer = createProducer();
    LOG.info("Starting FlinkKafkaInternalProducer ({}/{}) to produce into default topic {}", getRuntimeContext().getIndexOfThisSubtask() + 1, getRuntimeContext().getNumberOfParallelSubtasks(), defaultTopicId);
    // register Kafka metrics to Flink accumulators
    if (registerMetrics && !Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
        Map<MetricName, ? extends Metric> metrics = producer.metrics();
        if (metrics == null) {
            // MapR's Kafka implementation returns null here.
            LOG.info("Producer implementation does not support metrics");
        } else {
            final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
            for (Map.Entry<MetricName, ? extends Metric> entry : metrics.entrySet()) {
                String name = entry.getKey().name();
                Metric metric = entry.getValue();
                KafkaMetricMutableWrapper wrapper = previouslyCreatedMetrics.get(name);
                if (wrapper != null) {
                    wrapper.setKafkaMetric(metric);
                } else {
                    // TODO: somehow merge metrics from all active producers?
                    wrapper = new KafkaMetricMutableWrapper(metric);
                    previouslyCreatedMetrics.put(name, wrapper);
                    kafkaMetricGroup.gauge(name, wrapper);
                }
            }
        }
    }
    return producer;
}
Also used : MetricName(org.apache.kafka.common.MetricName) KafkaMetricMutableWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricMutableWrapper) MetricGroup(org.apache.flink.metrics.MetricGroup) Metric(org.apache.kafka.common.Metric) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with Metric

use of org.apache.kafka.common.Metric in project flink by apache.

the class FlinkKafkaProducerBase method open.

// ----------------------------------- Utilities --------------------------
/**
 * Initializes the connection to Kafka.
 */
@Override
public void open(Configuration configuration) throws Exception {
    if (schema instanceof KeyedSerializationSchemaWrapper) {
        ((KeyedSerializationSchemaWrapper<IN>) schema).getSerializationSchema().open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
    }
    producer = getKafkaProducer(this.producerConfig);
    RuntimeContext ctx = getRuntimeContext();
    if (null != flinkKafkaPartitioner) {
        flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
    }
    LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into default topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId);
    // register Kafka metrics to Flink accumulators
    if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
        Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
        if (metrics == null) {
            // MapR's Kafka implementation returns null here.
            LOG.info("Producer implementation does not support metrics");
        } else {
            final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
            for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
                kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
            }
        }
    }
    if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
        LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
        flushOnCheckpoint = false;
    }
    if (logFailuresOnly) {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception e) {
                if (e != null) {
                    LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
                }
                acknowledgeMessage();
            }
        };
    } else {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null && asyncException == null) {
                    asyncException = exception;
                }
                acknowledgeMessage();
            }
        };
    }
}
Also used : RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KeyedSerializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) RuntimeContextInitializationContextAdapters(org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) NetUtils(org.apache.flink.util.NetUtils) ArrayList(java.util.ArrayList) KafkaMetricWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Logger(org.slf4j.Logger) Properties(java.util.Properties) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Configuration(org.apache.flink.configuration.Configuration) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) KeyedSerializationSchema(org.apache.flink.streaming.util.serialization.KeyedSerializationSchema) RichSinkFunction(org.apache.flink.streaming.api.functions.sink.RichSinkFunction) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) SerializableObject(org.apache.flink.util.SerializableObject) MetricGroup(org.apache.flink.metrics.MetricGroup) List(java.util.List) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Internal(org.apache.flink.annotation.Internal) ClosureCleaner(org.apache.flink.api.java.ClosureCleaner) Comparator(java.util.Comparator) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) Callback(org.apache.kafka.clients.producer.Callback) Collections(java.util.Collections) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) KeyedSerializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper) MetricGroup(org.apache.flink.metrics.MetricGroup) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) MetricName(org.apache.kafka.common.MetricName) Callback(org.apache.kafka.clients.producer.Callback) KafkaMetricWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with Metric

use of org.apache.kafka.common.Metric in project kafka by apache.

the class MetricsTest method shouldPinSmallerValuesToMin.

@Test
public void shouldPinSmallerValuesToMin() {
    final double min = 0.0d;
    final double max = 100d;
    Percentiles percs = new Percentiles(1000, min, max, BucketSizing.LINEAR, new Percentile(metrics.metricName("test.p50", "grp1"), 50));
    MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
    Sensor sensor = metrics.sensor("test", config);
    sensor.add(percs);
    Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
    sensor.record(min - 100);
    sensor.record(min - 100);
    assertEquals(min, (double) p50.metricValue(), 0d);
}
Also used : Percentile(org.apache.kafka.common.metrics.stats.Percentile) Metric(org.apache.kafka.common.Metric) Percentiles(org.apache.kafka.common.metrics.stats.Percentiles) Test(org.junit.jupiter.api.Test)

Aggregations

Metric (org.apache.kafka.common.Metric)44 MetricName (org.apache.kafka.common.MetricName)26 Test (org.junit.Test)18 MockTime (org.apache.kafka.common.utils.MockTime)14 StreamsConfig (org.apache.kafka.streams.StreamsConfig)13 Map (java.util.Map)11 Test (org.junit.jupiter.api.Test)11 Metrics (org.apache.kafka.common.metrics.Metrics)10 Collections (java.util.Collections)9 HashMap (java.util.HashMap)9 Properties (java.util.Properties)9 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)9 StreamsMetricsImpl (org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl)9 ArrayList (java.util.ArrayList)8 TaskId (org.apache.kafka.streams.processor.TaskId)8 List (java.util.List)7 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsException (org.apache.kafka.streams.errors.StreamsException)7 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)7 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)6