use of org.apache.kafka.common.Metric in project apache-kafka-on-k8s by banzaicloud.
the class FrequenciesTest method testUseWithMetrics.
@Test
@SuppressWarnings("deprecation")
public void testUseWithMetrics() {
MetricName name1 = name("1");
MetricName name2 = name("2");
MetricName name3 = name("3");
MetricName name4 = name("4");
Frequencies frequencies = new Frequencies(4, 1.0, 4.0, new Frequency(name1, 1.0), new Frequency(name2, 2.0), new Frequency(name3, 3.0), new Frequency(name4, 4.0));
Sensor sensor = metrics.sensor("test", config);
sensor.add(frequencies);
Metric metric1 = this.metrics.metrics().get(name1);
Metric metric2 = this.metrics.metrics().get(name2);
Metric metric3 = this.metrics.metrics().get(name3);
Metric metric4 = this.metrics.metrics().get(name4);
// Record 2 windows worth of values
for (int i = 0; i != 100; ++i) {
frequencies.record(config, i % 4 + 1, time.milliseconds());
}
assertEquals(0.25, metric1.value(), DELTA);
assertEquals(0.25, metric2.value(), DELTA);
assertEquals(0.25, metric3.value(), DELTA);
assertEquals(0.25, metric4.value(), DELTA);
// Record 2 windows worth of values
for (int i = 0; i != 100; ++i) {
frequencies.record(config, i % 2 + 1, time.milliseconds());
}
assertEquals(0.50, metric1.value(), DELTA);
assertEquals(0.50, metric2.value(), DELTA);
assertEquals(0.00, metric3.value(), DELTA);
assertEquals(0.00, metric4.value(), DELTA);
// that is half 1.0 and half 2.0
for (int i = 0; i != 50; ++i) {
frequencies.record(config, 4.0, time.milliseconds());
}
assertEquals(0.25, metric1.value(), DELTA);
assertEquals(0.25, metric2.value(), DELTA);
assertEquals(0.00, metric3.value(), DELTA);
assertEquals(0.50, metric4.value(), DELTA);
}
use of org.apache.kafka.common.Metric in project flink by apache.
the class KafkaSourceReaderMetrics method maybeAddRecordsLagMetric.
/**
* Add a partition's records-lag metric to tracking list if this partition never appears before.
*
* <p>This method also lazily register {@link
* org.apache.flink.runtime.metrics.MetricNames#PENDING_RECORDS} in {@link
* SourceReaderMetricGroup}
*
* @param consumer Kafka consumer
* @param tp Topic partition
*/
public void maybeAddRecordsLagMetric(KafkaConsumer<?, ?> consumer, TopicPartition tp) {
// Lazily register pendingRecords
if (recordsLagMetrics == null) {
this.recordsLagMetrics = new ConcurrentHashMap<>();
this.sourceReaderMetricGroup.setPendingRecordsGauge(() -> {
long pendingRecordsTotal = 0;
for (Metric recordsLagMetric : this.recordsLagMetrics.values()) {
pendingRecordsTotal += ((Double) recordsLagMetric.metricValue()).longValue();
}
return pendingRecordsTotal;
});
}
recordsLagMetrics.computeIfAbsent(tp, (ignored) -> getRecordsLagMetric(consumer.metrics(), tp));
}
use of org.apache.kafka.common.Metric in project flink by apache.
the class FlinkKafkaProducer method initProducer.
private FlinkKafkaInternalProducer<byte[], byte[]> initProducer(boolean registerMetrics) {
FlinkKafkaInternalProducer<byte[], byte[]> producer = createProducer();
LOG.info("Starting FlinkKafkaInternalProducer ({}/{}) to produce into default topic {}", getRuntimeContext().getIndexOfThisSubtask() + 1, getRuntimeContext().getNumberOfParallelSubtasks(), defaultTopicId);
// register Kafka metrics to Flink accumulators
if (registerMetrics && !Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
Map<MetricName, ? extends Metric> metrics = producer.metrics();
if (metrics == null) {
// MapR's Kafka implementation returns null here.
LOG.info("Producer implementation does not support metrics");
} else {
final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
for (Map.Entry<MetricName, ? extends Metric> entry : metrics.entrySet()) {
String name = entry.getKey().name();
Metric metric = entry.getValue();
KafkaMetricMutableWrapper wrapper = previouslyCreatedMetrics.get(name);
if (wrapper != null) {
wrapper.setKafkaMetric(metric);
} else {
// TODO: somehow merge metrics from all active producers?
wrapper = new KafkaMetricMutableWrapper(metric);
previouslyCreatedMetrics.put(name, wrapper);
kafkaMetricGroup.gauge(name, wrapper);
}
}
}
}
return producer;
}
use of org.apache.kafka.common.Metric in project flink by apache.
the class FlinkKafkaProducerBase method open.
// ----------------------------------- Utilities --------------------------
/**
* Initializes the connection to Kafka.
*/
@Override
public void open(Configuration configuration) throws Exception {
if (schema instanceof KeyedSerializationSchemaWrapper) {
((KeyedSerializationSchemaWrapper<IN>) schema).getSerializationSchema().open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
}
producer = getKafkaProducer(this.producerConfig);
RuntimeContext ctx = getRuntimeContext();
if (null != flinkKafkaPartitioner) {
flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
}
LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into default topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId);
// register Kafka metrics to Flink accumulators
if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
if (metrics == null) {
// MapR's Kafka implementation returns null here.
LOG.info("Producer implementation does not support metrics");
} else {
final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
}
}
}
if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
flushOnCheckpoint = false;
}
if (logFailuresOnly) {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
}
acknowledgeMessage();
}
};
} else {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null && asyncException == null) {
asyncException = exception;
}
acknowledgeMessage();
}
};
}
}
use of org.apache.kafka.common.Metric in project kafka by apache.
the class MetricsTest method shouldPinSmallerValuesToMin.
@Test
public void shouldPinSmallerValuesToMin() {
final double min = 0.0d;
final double max = 100d;
Percentiles percs = new Percentiles(1000, min, max, BucketSizing.LINEAR, new Percentile(metrics.metricName("test.p50", "grp1"), 50));
MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
Sensor sensor = metrics.sensor("test", config);
sensor.add(percs);
Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
sensor.record(min - 100);
sensor.record(min - 100);
assertEquals(min, (double) p50.metricValue(), 0d);
}
Aggregations