use of org.apache.kafka.common.metrics.stats.Value in project ksql by confluentinc.
the class KsqlEngineMetrics method configureErrorRate.
private Sensor configureErrorRate(Metrics metrics) {
Sensor sensor = createSensor(metrics, metricGroupName + "-error-rate");
sensor.add(metrics.metricName("error-rate", this.metricGroupName, "The number of messages which were consumed but not processed. " + "Messages may not be processed if, for instance, the message " + "contents could not be deserialized due to an incompatible schema. " + "Alternately, a consumed messages may not have been produced, hence " + "being effectively dropped. Such messages would also be counted " + "toward the error rate."), new Value());
return sensor;
}
use of org.apache.kafka.common.metrics.stats.Value in project ksql by confluentinc.
the class KsqlEngineMetrics method configureMessagesIn.
private Sensor configureMessagesIn(Metrics metrics) {
Sensor sensor = createSensor(metrics, metricGroupName + "-messages-consumed");
sensor.add(metrics.metricName("messages-consumed-per-sec", this.metricGroupName, "The number of messages consumed per second across all queries"), new Value());
return sensor;
}
use of org.apache.kafka.common.metrics.stats.Value in project ksql by confluentinc.
the class KsqlEngineMetrics method configureIdleQueriesSensor.
private Sensor configureIdleQueriesSensor(Metrics metrics) {
Sensor sensor = createSensor(metrics, "num-idle-queries");
sensor.add(metrics.metricName("num-idle-queries", this.metricGroupName), new Value());
return sensor;
}
use of org.apache.kafka.common.metrics.stats.Value in project kafka by apache.
the class MetricsTest method testConcurrentReadUpdateReport.
/**
* Verifies that concurrent sensor add, remove, updates and read with a metrics reporter
* that synchronizes on every reporter method doesn't result in errors or deadlock.
*/
@Test
public void testConcurrentReadUpdateReport() throws Exception {
class LockingReporter implements MetricsReporter {
Map<MetricName, KafkaMetric> activeMetrics = new HashMap<>();
@Override
public synchronized void init(List<KafkaMetric> metrics) {
}
@Override
public synchronized void metricChange(KafkaMetric metric) {
activeMetrics.put(metric.metricName(), metric);
}
@Override
public synchronized void metricRemoval(KafkaMetric metric) {
activeMetrics.remove(metric.metricName(), metric);
}
@Override
public synchronized void close() {
}
@Override
public void configure(Map<String, ?> configs) {
}
synchronized void processMetrics() {
for (KafkaMetric metric : activeMetrics.values()) {
assertNotNull(metric.metricValue(), "Invalid metric value");
}
}
}
final LockingReporter reporter = new LockingReporter();
this.metrics.close();
this.metrics = new Metrics(config, Arrays.asList(reporter), new MockTime(10), true);
final Deque<Sensor> sensors = new ConcurrentLinkedDeque<>();
SensorCreator sensorCreator = new SensorCreator(metrics);
final Random random = new Random();
final AtomicBoolean alive = new AtomicBoolean(true);
executorService = Executors.newFixedThreadPool(3);
Future<?> writeFuture = executorService.submit(new ConcurrentMetricOperation(alive, "record", () -> sensors.forEach(sensor -> sensor.record(random.nextInt(10000)))));
Future<?> readFuture = executorService.submit(new ConcurrentMetricOperation(alive, "read", () -> sensors.forEach(sensor -> sensor.metrics().forEach(metric -> assertNotNull(metric.metricValue(), "Invalid metric value")))));
Future<?> reportFuture = executorService.submit(new ConcurrentMetricOperation(alive, "report", reporter::processMetrics));
for (int i = 0; i < 10000; i++) {
if (sensors.size() > 10) {
Sensor sensor = random.nextBoolean() ? sensors.removeFirst() : sensors.removeLast();
metrics.removeSensor(sensor.name());
}
StatType statType = StatType.forId(random.nextInt(StatType.values().length));
sensors.add(sensorCreator.createSensor(statType, i));
}
assertFalse(readFuture.isDone(), "Read failed");
assertFalse(writeFuture.isDone(), "Write failed");
assertFalse(reportFuture.isDone(), "Report failed");
alive.set(false);
}
Aggregations