use of org.apache.kafka.common.metrics.MetricConfig in project ksql by confluentinc.
the class MetricCollectors method initialize.
// visible for testing.
// We need to call this from the MetricCollectorsTest because otherwise tests clobber each
// others metric data. We also need it from the KsqlEngineMetricsTest
public static void initialize() {
MetricConfig metricConfig = new MetricConfig().samples(100).timeWindow(1000, TimeUnit.MILLISECONDS);
List<MetricsReporter> reporters = new ArrayList<>();
reporters.add(new JmxReporter("io.confluent.ksql.metrics"));
// Replace all static contents other than Time to ensure they are cleaned for tests that are
// not aware of the need to initialize/cleanup this test, in case test processes are reused.
// Tests aware of the class clean everything up properly to get the state into a clean state,
// a full, fresh instantiation here ensures something like KsqlEngineMetricsTest running after
// another test that used MetricsCollector without running cleanUp will behave correctly.
metrics = new Metrics(metricConfig, reporters, new SystemTime());
collectorMap = new ConcurrentHashMap<>();
}
use of org.apache.kafka.common.metrics.MetricConfig in project ksql by confluentinc.
the class KsqlEngineMetrics method configureNumActiveQueries.
private Sensor configureNumActiveQueries(Metrics metrics) {
Sensor sensor = createSensor(metrics, metricGroupName + "-active-queries");
sensor.add(metrics.metricName("num-active-queries", this.metricGroupName, "The current number of active queries running in this engine"), new MeasurableStat() {
@Override
public double measure(MetricConfig metricConfig, long l) {
return ksqlEngine.numberOfLiveQueries();
}
@Override
public void record(MetricConfig metricConfig, double v, long l) {
// We don't want to record anything, since the live queries anyway.
}
});
sensor.add(metrics.metricName("num-persistent-queries", this.metricGroupName, "The current number of persistent queries running in this engine"), new MeasurableStat() {
@Override
public double measure(MetricConfig metricConfig, long l) {
return ksqlEngine.numberOfPersistentQueries();
}
@Override
public void record(MetricConfig metricConfig, double v, long l) {
// No action for record since we can read the desired results directly.
}
});
return sensor;
}
use of org.apache.kafka.common.metrics.MetricConfig in project apache-kafka-on-k8s by banzaicloud.
the class FrequenciesTest method setup.
@Before
public void setup() {
config = new MetricConfig().eventWindow(50).samples(2);
time = new MockTime();
metrics = new Metrics(config, Arrays.asList((MetricsReporter) new JmxReporter()), time, true);
}
use of org.apache.kafka.common.metrics.MetricConfig in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetcherMetricsTemplates.
@Test
public void testFetcherMetricsTemplates() throws Exception {
metrics.close();
Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA");
metrics = new Metrics(new MetricConfig().tags(clientTags));
metricsRegistry = new FetcherMetricsRegistry(clientTags.keySet(), "consumer" + groupId);
fetcher.close();
fetcher = createFetcher(subscriptions, metrics);
// Fetch from topic to generate topic metrics
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetcher.fetchedRecords();
assertTrue(partitionRecords.containsKey(tp0));
// Create throttle metrics
Fetcher.throttleTimeSensor(metrics, metricsRegistry);
// Verify that all metrics except metrics-count have registered templates
Set<MetricNameTemplate> allMetrics = new HashSet<>();
for (MetricName n : metrics.metrics().keySet()) {
String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}");
if (!n.group().equals("kafka-metrics-count"))
allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet()));
}
TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates");
}
use of org.apache.kafka.common.metrics.MetricConfig in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulator method registerMetrics.
private void registerMetrics(Metrics metrics, String metricGrpName) {
MetricName metricName = metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records");
Measurable waitingThreads = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.queued();
}
};
metrics.addMetric(metricName, waitingThreads);
metricName = metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used).");
Measurable totalBytes = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.totalMemory();
}
};
metrics.addMetric(metricName, totalBytes);
metricName = metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list).");
Measurable availableBytes = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.availableMemory();
}
};
metrics.addMetric(metricName, availableBytes);
Sensor bufferExhaustedRecordSensor = metrics.sensor("buffer-exhausted-records");
MetricName rateMetricName = metrics.metricName("buffer-exhausted-rate", metricGrpName, "The average per-second number of record sends that are dropped due to buffer exhaustion");
MetricName totalMetricName = metrics.metricName("buffer-exhausted-total", metricGrpName, "The total number of record sends that are dropped due to buffer exhaustion");
bufferExhaustedRecordSensor.add(new Meter(rateMetricName, totalMetricName));
}
Aggregations