use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.
the class KafkaSourceReaderMetricsTest method testCommitOffsetTracking.
@Test
public void testCommitOffsetTracking() {
MetricListener metricListener = new MetricListener();
final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
kafkaSourceReaderMetrics.registerTopicPartition(FOO_0);
kafkaSourceReaderMetrics.registerTopicPartition(FOO_1);
kafkaSourceReaderMetrics.registerTopicPartition(BAR_0);
kafkaSourceReaderMetrics.registerTopicPartition(BAR_1);
kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L);
kafkaSourceReaderMetrics.recordCommittedOffset(FOO_1, 18213L);
kafkaSourceReaderMetrics.recordCommittedOffset(BAR_0, 18613L);
kafkaSourceReaderMetrics.recordCommittedOffset(BAR_1, 15513L);
assertCommittedOffset(FOO_0, 15213L, metricListener);
assertCommittedOffset(FOO_1, 18213L, metricListener);
assertCommittedOffset(BAR_0, 18613L, metricListener);
assertCommittedOffset(BAR_1, 15513L, metricListener);
final Optional<Counter> commitsSucceededCounter = metricListener.getCounter(KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_SUCCEEDED_METRIC_COUNTER);
assertTrue(commitsSucceededCounter.isPresent());
assertEquals(0L, commitsSucceededCounter.get().getCount());
kafkaSourceReaderMetrics.recordSucceededCommit();
assertEquals(1L, commitsSucceededCounter.get().getCount());
}
use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.
the class KafkaWriterITCase method setUp.
@BeforeEach
public void setUp(TestInfo testInfo) {
metricListener = new MetricListener();
timeService = new TriggerTimeService();
topic = testInfo.getDisplayName().replaceAll("\\W", "");
}
use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.
the class KafkaPartitionSplitReaderTest method testPendingRecordsGauge.
@ParameterizedTest
@EmptySource
@ValueSource(strings = { "_underscore.period-minus" })
public void testPendingRecordsGauge(String topicSuffix) throws Throwable {
final String topic1Name = TOPIC1 + topicSuffix;
final String topic2Name = TOPIC2 + topicSuffix;
if (!topicSuffix.isEmpty()) {
KafkaSourceTestEnv.setupTopic(topic1Name, true, true, KafkaSourceTestEnv::getRecordsForTopic);
KafkaSourceTestEnv.setupTopic(topic2Name, true, true, KafkaSourceTestEnv::getRecordsForTopic);
}
MetricListener metricListener = new MetricListener();
final Properties props = new Properties();
props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
KafkaPartitionSplitReader reader = createReader(props, InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
// Add a split
reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(topic1Name, 0), 0L))));
// pendingRecords should have not been registered because of lazily registration
assertFalse(metricListener.getGauge(MetricNames.PENDING_RECORDS).isPresent());
// Trigger first fetch
reader.fetch();
final Optional<Gauge<Long>> pendingRecords = metricListener.getGauge(MetricNames.PENDING_RECORDS);
assertTrue(pendingRecords.isPresent());
// Validate pendingRecords
assertNotNull(pendingRecords);
assertEquals(NUM_RECORDS_PER_PARTITION - 1, (long) pendingRecords.get().getValue());
for (int i = 1; i < NUM_RECORDS_PER_PARTITION; i++) {
reader.fetch();
assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue());
}
// Add another split
reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(topic2Name, 0), 0L))));
// Validate pendingRecords
for (int i = 0; i < NUM_RECORDS_PER_PARTITION; i++) {
reader.fetch();
assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue());
}
}
use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.
the class KafkaSourceReaderMetricsTest method testNonTrackingTopicPartition.
@Test
public void testNonTrackingTopicPartition() {
MetricListener metricListener = new MetricListener();
final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
assertThrows(IllegalArgumentException.class, () -> kafkaSourceReaderMetrics.recordCurrentOffset(FOO_0, 15213L));
assertThrows(IllegalArgumentException.class, () -> kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L));
}
use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.
the class KafkaSourceReaderMetricsTest method testFailedCommit.
@Test
public void testFailedCommit() {
MetricListener metricListener = new MetricListener();
final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
kafkaSourceReaderMetrics.recordFailedCommit();
final Optional<Counter> commitsFailedCounter = metricListener.getCounter(KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_FAILED_METRIC_COUNTER);
assertTrue(commitsFailedCounter.isPresent());
assertEquals(1L, commitsFailedCounter.get().getCount());
}
Aggregations