Search in sources :

Example 1 with KafkaMetric

use of org.apache.kafka.common.metrics.KafkaMetric in project cruise-control by linkedin.

the class CruiseControlMetricsReporter method init.

@Override
public void init(List<KafkaMetric> metrics) {
    for (KafkaMetric kafkaMetric : metrics) {
        addMetricIfInterested(kafkaMetric);
    }
    LOG.info("Added {} Kafka metrics for Cruise Control metrics during initialization.", _interestedMetrics.size());
    _metricsReporterRunner = new KafkaThread("CruiseControlMetricsReporterRunner", this, true);
    _yammerMetricProcessor = new YammerMetricProcessor();
    _metricsReporterRunner.start();
}
Also used : KafkaThread(org.apache.kafka.common.utils.KafkaThread) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) YammerMetricProcessor(com.linkedin.kafka.cruisecontrol.metricsreporter.metric.YammerMetricProcessor)

Example 2 with KafkaMetric

use of org.apache.kafka.common.metrics.KafkaMetric in project cruise-control by linkedin.

the class CruiseControlMetricsReporter method reportKafkaMetrics.

private void reportKafkaMetrics(long now) {
    LOG.debug("Reporting KafkaMetrics. {}", _interestedMetrics.values());
    for (KafkaMetric metric : _interestedMetrics.values()) {
        sendCruiseControlMetric(MetricsUtils.toCruiseControlMetric(metric, now, _brokerId));
    }
    LOG.debug("Finished reporting KafkaMetrics.");
}
Also used : KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric)

Example 3 with KafkaMetric

use of org.apache.kafka.common.metrics.KafkaMetric in project ksql by confluentinc.

the class ConsumerCollector method addSensor.

private void addSensor(String key, String metricNameString, MeasurableStat stat, List<TopicSensors.SensorMetric<ConsumerRecord>> sensors, boolean isError, Function<ConsumerRecord, Double> recordValue) {
    String name = "cons-" + key + "-" + metricNameString + "-" + id;
    MetricName metricName = new MetricName(metricNameString, "consumer-metrics", "consumer-" + name, ImmutableMap.of("key", key, "id", id));
    Sensor existingSensor = metrics.getSensor(name);
    Sensor sensor = metrics.sensor(name);
    // re-use the existing measurable stats to share between consumers
    if (existingSensor == null || metrics.metrics().get(metricName) == null) {
        sensor.add(metricName, stat);
    }
    KafkaMetric metric = metrics.metrics().get(metricName);
    sensors.add(new TopicSensors.SensorMetric<ConsumerRecord>(sensor, metric, time, isError) {

        void record(ConsumerRecord record) {
            sensor.record(recordValue.apply(record));
            super.record(record);
        }
    });
}
Also used : MetricName(org.apache.kafka.common.MetricName) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Sensor(org.apache.kafka.common.metrics.Sensor)

Example 4 with KafkaMetric

use of org.apache.kafka.common.metrics.KafkaMetric in project ksql by confluentinc.

the class ProducerCollector method addSensor.

private void addSensor(String key, String metricNameString, MeasurableStat stat, List<TopicSensors.SensorMetric<ProducerRecord>> results, boolean isError) {
    String name = "prod-" + key + "-" + metricNameString + "-" + id;
    MetricName metricName = new MetricName(metricNameString, "producer-metrics", "producer-" + name, ImmutableMap.of("key", key, "id", id));
    Sensor existingSensor = metrics.getSensor(name);
    Sensor sensor = metrics.sensor(name);
    // either a new sensor or a new metric with different id
    if (existingSensor == null || metrics.metrics().get(metricName) == null) {
        sensor.add(metricName, stat);
    }
    KafkaMetric metric = metrics.metrics().get(metricName);
    results.add(new TopicSensors.SensorMetric<ProducerRecord>(sensor, metric, time, isError) {

        void record(ProducerRecord record) {
            sensor.record(1);
            super.record(record);
        }
    });
}
Also used : MetricName(org.apache.kafka.common.MetricName) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) Sensor(org.apache.kafka.common.metrics.Sensor)

Example 5 with KafkaMetric

use of org.apache.kafka.common.metrics.KafkaMetric in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testAbortRetryWhenProducerIdChanges.

@Test
@SuppressWarnings("deprecation")
public void testAbortRetryWhenProducerIdChanges() throws InterruptedException {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0));
    setupWithTransactionState(transactionManager);
    client.setNode(new Node(1, "localhost", 33343));
    int maxRetries = 10;
    Metrics m = new Metrics();
    SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
    Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions);
    Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    // connect.
    sender.run(time.milliseconds());
    // send.
    sender.run(time.milliseconds());
    String id = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(id), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    assertTrue("Client ready status should be true", client.isReady(node, 0L));
    client.disconnect(id);
    assertEquals(0, client.inFlightRequestCount());
    assertFalse("Client ready status should be false", client.isReady(node, 0L));
    transactionManager.resetProducerId();
    transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId + 1, (short) 0));
    // receive error
    sender.run(time.milliseconds());
    // reconnect
    sender.run(time.milliseconds());
    // nothing to do, since the pid has changed. We should check the metrics for errors.
    sender.run(time.milliseconds());
    assertEquals("Expected requests to be aborted after pid change", 0, client.inFlightRequestCount());
    KafkaMetric recordErrors = m.metrics().get(senderMetrics.recordErrorRate);
    assertTrue("Expected non-zero value for record send errors", recordErrors.value() > 0);
    assertTrue(responseFuture.isDone());
    assertEquals(0, (long) transactionManager.sequenceNumber(tp0));
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Metrics(org.apache.kafka.common.metrics.Metrics) Node(org.apache.kafka.common.Node) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) Test(org.junit.Test)

Aggregations

KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)95 Test (org.junit.Test)73 MetricName (org.apache.kafka.common.MetricName)36 HashMap (java.util.HashMap)17 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)14 LinkedHashMap (java.util.LinkedHashMap)12 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)12 Windowed (org.apache.kafka.streams.kstream.Windowed)12 Test (org.junit.jupiter.api.Test)11 TopicPartition (org.apache.kafka.common.TopicPartition)10 List (java.util.List)8 Cluster (org.apache.kafka.common.Cluster)8 Node (org.apache.kafka.common.Node)8 Sensor (org.apache.kafka.common.metrics.Sensor)8 LegacyRecord (org.apache.kafka.common.record.LegacyRecord)8 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)8 Record (org.apache.kafka.common.record.Record)8 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)7 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)7