use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.
the class RecordAccumulator method registerMetrics.
private void registerMetrics(Metrics metrics, String metricGrpName) {
MetricName metricName = metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records");
Measurable waitingThreads = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.queued();
}
};
metrics.addMetric(metricName, waitingThreads);
metricName = metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used).");
Measurable totalBytes = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.totalMemory();
}
};
metrics.addMetric(metricName, totalBytes);
metricName = metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list).");
Measurable availableBytes = new Measurable() {
public double measure(MetricConfig config, long now) {
return free.availableMemory();
}
};
metrics.addMetric(metricName, availableBytes);
}
use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.
the class ProducerMetrics method main.
public static void main(String[] args) {
Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id");
MetricConfig metricConfig = new MetricConfig().tags(metricTags);
Metrics metrics = new Metrics(metricConfig);
ProducerMetrics metricsRegistry = new ProducerMetrics(metrics);
System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates()));
}
use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.
the class KafkaConsumer method buildMetrics.
private static Metrics buildMetrics(ConsumerConfig config, Time time, String clientId) {
Map<String, String> metricsTags = Collections.singletonMap(CLIENT_ID_METRIC_TAG, clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)).timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS).recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))).tags(metricsTags);
List<MetricsReporter> reporters = config.getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class, Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId));
JmxReporter jmxReporter = new JmxReporter();
jmxReporter.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)));
reporters.add(jmxReporter);
MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
return new Metrics(metricConfig, reporters, time, metricsContext);
}
use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.
the class FetcherTest method testPreferredReadReplicaOffsetError.
@Test
public void testPreferredReadReplicaOffsetError() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), 1);
// Return an error, should unset the preferred read replica
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, this.records, Errors.OFFSET_OUT_OF_RANGE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.empty()));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), -1);
}
use of org.apache.kafka.common.metrics.MetricConfig in project kafka by apache.
the class StreamTaskTest method shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes.
@Test
public void shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes() {
time = new MockTime(0L, 0L, 0L);
metrics = new Metrics(new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO), time);
// Create a processor that only forwards even keys to test the metrics at the source and terminal nodes
final MockSourceNode<Integer, Integer> evenKeyForwardingSourceNode = new MockSourceNode<Integer, Integer>(intDeserializer, intDeserializer) {
InternalProcessorContext<Integer, Integer> context;
@Override
public void init(final InternalProcessorContext<Integer, Integer> context) {
this.context = context;
super.init(context);
}
@Override
public void process(final Record<Integer, Integer> record) {
if (record.key() % 2 == 0) {
context.forward(record);
}
}
};
task = createStatelessTaskWithForwardingTopology(evenKeyForwardingSourceNode);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
final String sourceNodeName = evenKeyForwardingSourceNode.name();
final String terminalNodeName = processorStreamTime.name();
final Metric sourceAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric sourceMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric sourceMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
// e2e latency = 10
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(0, 0L)));
task.process(10L);
assertThat(sourceAvg.metricValue(), equalTo(10.0));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(10.0));
// key 0: reaches terminal node
assertThat(terminalAvg.metricValue(), equalTo(10.0));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(10.0));
// e2e latency = 15
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(1, 0L)));
task.process(15L);
assertThat(sourceAvg.metricValue(), equalTo(12.5));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(15.0));
// key 1: stops at source, doesn't affect terminal node metrics
assertThat(terminalAvg.metricValue(), equalTo(10.0));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(10.0));
// e2e latency = 23
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(2, 0L)));
task.process(23L);
assertThat(sourceAvg.metricValue(), equalTo(16.0));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(23.0));
// key 2: reaches terminal node
assertThat(terminalAvg.metricValue(), equalTo(16.5));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(23.0));
// e2e latency = 5
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(3, 0L)));
task.process(5L);
assertThat(sourceAvg.metricValue(), equalTo(13.25));
assertThat(sourceMin.metricValue(), equalTo(5.0));
assertThat(sourceMax.metricValue(), equalTo(23.0));
// key 3: stops at source, doesn't affect terminal node metrics
assertThat(terminalAvg.metricValue(), equalTo(16.5));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(23.0));
}
Aggregations