use of org.apache.kafka.common.Metric in project kafka by apache.
the class MetricsTest method testPercentiles.
@Test
public void testPercentiles() {
int buckets = 100;
Percentiles percs = new Percentiles(4 * buckets, 0.0, 100.0, BucketSizing.CONSTANT, new Percentile(metrics.metricName("test.p25", "grp1"), 25), new Percentile(metrics.metricName("test.p50", "grp1"), 50), new Percentile(metrics.metricName("test.p75", "grp1"), 75));
MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
Sensor sensor = metrics.sensor("test", config);
sensor.add(percs);
Metric p25 = this.metrics.metrics().get(metrics.metricName("test.p25", "grp1"));
Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
Metric p75 = this.metrics.metrics().get(metrics.metricName("test.p75", "grp1"));
// record two windows worth of sequential values
for (int i = 0; i < buckets; i++) sensor.record(i);
assertEquals(25, (Double) p25.metricValue(), 1.0);
assertEquals(50, (Double) p50.metricValue(), 1.0);
assertEquals(75, (Double) p75.metricValue(), 1.0);
for (int i = 0; i < buckets; i++) sensor.record(0.0);
assertEquals(0.0, (Double) p25.metricValue(), 1.0);
assertEquals(0.0, (Double) p50.metricValue(), 1.0);
assertEquals(0.0, (Double) p75.metricValue(), 1.0);
// record two more windows worth of sequential values
for (int i = 0; i < buckets; i++) sensor.record(i);
assertEquals(25, (Double) p25.metricValue(), 1.0);
assertEquals(50, (Double) p50.metricValue(), 1.0);
assertEquals(75, (Double) p75.metricValue(), 1.0);
}
use of org.apache.kafka.common.Metric in project kafka by apache.
the class MetricsTest method shouldPinLargerValuesToMax.
@Test
public void shouldPinLargerValuesToMax() {
final double min = 0.0d;
final double max = 100d;
Percentiles percs = new Percentiles(1000, min, max, BucketSizing.LINEAR, new Percentile(metrics.metricName("test.p50", "grp1"), 50));
MetricConfig config = new MetricConfig().eventWindow(50).samples(2);
Sensor sensor = metrics.sensor("test", config);
sensor.add(percs);
Metric p50 = this.metrics.metrics().get(metrics.metricName("test.p50", "grp1"));
sensor.record(max + 100);
sensor.record(max + 100);
assertEquals(max, (double) p50.metricValue(), 0d);
}
use of org.apache.kafka.common.Metric in project kafka by apache.
the class RocksDBMetricsIntegrationTest method checkMetricByName.
private void checkMetricByName(final List<Metric> listMetric, final String metricName, final int numMetric) {
final List<Metric> metrics = listMetric.stream().filter(m -> m.metricName().name().equals(metricName)).collect(Collectors.toList());
assertThat("Size of metrics of type:'" + metricName + "' must be equal to " + numMetric + " but it's equal to " + metrics.size(), metrics.size(), is(numMetric));
for (final Metric metric : metrics) {
assertThat("Metric:'" + metric.metricName() + "' must be not null", metric.metricValue(), is(notNullValue()));
}
}
use of org.apache.kafka.common.Metric in project kafka by apache.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(new Exception()), new AlwaysContinueProductionExceptionHandler(), streamsMetrics);
try (final LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) {
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
final List<String> messages = logCaptureAppender.getMessages();
final StringBuilder errorMessage = new StringBuilder("Messages received:");
for (final String error : messages) {
errorMessage.append("\n - ").append(error);
}
assertTrue(errorMessage.toString(), messages.get(messages.size() - 1).endsWith("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded."));
}
final Metric metric = streamsMetrics.metrics().get(new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", Thread.currentThread().getName()), mkEntry("task-id", taskId.toString()))));
assertEquals(1.0, metric.metricValue());
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
collector.closeClean();
}
use of org.apache.kafka.common.Metric in project kafka by apache.
the class IntegrationTestUtils method waitForCompletion.
/**
* Wait for streams to "finish", based on the consumer lag metric. Includes only the main consumer, for
* completion of standbys as well see {@link #waitForStandbyCompletion}
*
* Caveats:
* - Inputs must be finite, fully loaded, and flushed before this method is called
* - expectedPartitions is the total number of partitions to watch the lag on, including both input and internal.
* It's somewhat ok to get this wrong, as the main failure case would be an immediate return due to the clients
* not being initialized, which you can avoid with any non-zero value. But it's probably better to get it right ;)
*/
public static void waitForCompletion(final KafkaStreams streams, final int expectedPartitions, final long timeoutMilliseconds) {
final long start = System.currentTimeMillis();
while (true) {
int lagMetrics = 0;
double totalLag = 0.0;
for (final Metric metric : streams.metrics().values()) {
if (metric.metricName().name().equals("records-lag")) {
if (!metric.metricName().tags().get("client-id").endsWith("restore-consumer")) {
lagMetrics++;
totalLag += ((Number) metric.metricValue()).doubleValue();
}
}
}
if (lagMetrics >= expectedPartitions && totalLag == 0.0) {
return;
}
if (System.currentTimeMillis() - start >= timeoutMilliseconds) {
throw new RuntimeException(String.format("Timed out waiting for completion. lagMetrics=[%s/%s] totalLag=[%s]", lagMetrics, expectedPartitions, totalLag));
}
}
}
Aggregations