Search in sources :

Example 1 with MetricListener

use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.

the class KafkaSourceReaderMetricsTest method testCommitOffsetTracking.

@Test
public void testCommitOffsetTracking() {
    MetricListener metricListener = new MetricListener();
    final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
    kafkaSourceReaderMetrics.registerTopicPartition(FOO_0);
    kafkaSourceReaderMetrics.registerTopicPartition(FOO_1);
    kafkaSourceReaderMetrics.registerTopicPartition(BAR_0);
    kafkaSourceReaderMetrics.registerTopicPartition(BAR_1);
    kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L);
    kafkaSourceReaderMetrics.recordCommittedOffset(FOO_1, 18213L);
    kafkaSourceReaderMetrics.recordCommittedOffset(BAR_0, 18613L);
    kafkaSourceReaderMetrics.recordCommittedOffset(BAR_1, 15513L);
    assertCommittedOffset(FOO_0, 15213L, metricListener);
    assertCommittedOffset(FOO_1, 18213L, metricListener);
    assertCommittedOffset(BAR_0, 18613L, metricListener);
    assertCommittedOffset(BAR_1, 15513L, metricListener);
    final Optional<Counter> commitsSucceededCounter = metricListener.getCounter(KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_SUCCEEDED_METRIC_COUNTER);
    assertTrue(commitsSucceededCounter.isPresent());
    assertEquals(0L, commitsSucceededCounter.get().getCount());
    kafkaSourceReaderMetrics.recordSucceededCommit();
    assertEquals(1L, commitsSucceededCounter.get().getCount());
}
Also used : Counter(org.apache.flink.metrics.Counter) MetricListener(org.apache.flink.metrics.testutils.MetricListener) Test(org.junit.Test)

Example 2 with MetricListener

use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.

the class KafkaWriterITCase method setUp.

@BeforeEach
public void setUp(TestInfo testInfo) {
    metricListener = new MetricListener();
    timeService = new TriggerTimeService();
    topic = testInfo.getDisplayName().replaceAll("\\W", "");
}
Also used : MetricListener(org.apache.flink.metrics.testutils.MetricListener) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 3 with MetricListener

use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.

the class KafkaPartitionSplitReaderTest method testPendingRecordsGauge.

@ParameterizedTest
@EmptySource
@ValueSource(strings = { "_underscore.period-minus" })
public void testPendingRecordsGauge(String topicSuffix) throws Throwable {
    final String topic1Name = TOPIC1 + topicSuffix;
    final String topic2Name = TOPIC2 + topicSuffix;
    if (!topicSuffix.isEmpty()) {
        KafkaSourceTestEnv.setupTopic(topic1Name, true, true, KafkaSourceTestEnv::getRecordsForTopic);
        KafkaSourceTestEnv.setupTopic(topic2Name, true, true, KafkaSourceTestEnv::getRecordsForTopic);
    }
    MetricListener metricListener = new MetricListener();
    final Properties props = new Properties();
    props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
    KafkaPartitionSplitReader reader = createReader(props, InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
    // Add a split
    reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(topic1Name, 0), 0L))));
    // pendingRecords should have not been registered because of lazily registration
    assertFalse(metricListener.getGauge(MetricNames.PENDING_RECORDS).isPresent());
    // Trigger first fetch
    reader.fetch();
    final Optional<Gauge<Long>> pendingRecords = metricListener.getGauge(MetricNames.PENDING_RECORDS);
    assertTrue(pendingRecords.isPresent());
    // Validate pendingRecords
    assertNotNull(pendingRecords);
    assertEquals(NUM_RECORDS_PER_PARTITION - 1, (long) pendingRecords.get().getValue());
    for (int i = 1; i < NUM_RECORDS_PER_PARTITION; i++) {
        reader.fetch();
        assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue());
    }
    // Add another split
    reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(topic2Name, 0), 0L))));
    // Validate pendingRecords
    for (int i = 0; i < NUM_RECORDS_PER_PARTITION; i++) {
        reader.fetch();
        assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue());
    }
}
Also used : KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaSourceTestEnv(org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv) Properties(java.util.Properties) MetricListener(org.apache.flink.metrics.testutils.MetricListener) Gauge(org.apache.flink.metrics.Gauge) EmptySource(org.junit.jupiter.params.provider.EmptySource) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 4 with MetricListener

use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.

the class KafkaSourceReaderMetricsTest method testNonTrackingTopicPartition.

@Test
public void testNonTrackingTopicPartition() {
    MetricListener metricListener = new MetricListener();
    final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
    assertThrows(IllegalArgumentException.class, () -> kafkaSourceReaderMetrics.recordCurrentOffset(FOO_0, 15213L));
    assertThrows(IllegalArgumentException.class, () -> kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L));
}
Also used : MetricListener(org.apache.flink.metrics.testutils.MetricListener) Test(org.junit.Test)

Example 5 with MetricListener

use of org.apache.flink.metrics.testutils.MetricListener in project flink by apache.

the class KafkaSourceReaderMetricsTest method testFailedCommit.

@Test
public void testFailedCommit() {
    MetricListener metricListener = new MetricListener();
    final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup()));
    kafkaSourceReaderMetrics.recordFailedCommit();
    final Optional<Counter> commitsFailedCounter = metricListener.getCounter(KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_FAILED_METRIC_COUNTER);
    assertTrue(commitsFailedCounter.isPresent());
    assertEquals(1L, commitsFailedCounter.get().getCount());
}
Also used : Counter(org.apache.flink.metrics.Counter) MetricListener(org.apache.flink.metrics.testutils.MetricListener) Test(org.junit.Test)

Aggregations

MetricListener (org.apache.flink.metrics.testutils.MetricListener)10 Test (org.junit.Test)6 Counter (org.apache.flink.metrics.Counter)5 Gauge (org.apache.flink.metrics.Gauge)3 KafkaPartitionSplit (org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit)2 MetricGroup (org.apache.flink.metrics.MetricGroup)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 BeforeEach (org.junit.jupiter.api.BeforeEach)2 Properties (java.util.Properties)1 KafkaSourceTestEnv (org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv)1 TestingReaderOutput (org.apache.flink.connector.testutils.source.reader.TestingReaderOutput)1 Histogram (org.apache.flink.metrics.Histogram)1 HistogramStatistics (org.apache.flink.metrics.HistogramStatistics)1 Meter (org.apache.flink.metrics.Meter)1 RestHighLevelClient (org.elasticsearch.client.RestHighLevelClient)1 Test (org.junit.jupiter.api.Test)1 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)1 EmptySource (org.junit.jupiter.params.provider.EmptySource)1 ValueSource (org.junit.jupiter.params.provider.ValueSource)1