Search in sources :

Example 11 with Metric

use of org.apache.flink.metrics.Metric in project flink by apache.

the class InMemoryReporter method notifyOfRemovedMetric.

@Override
public void notifyOfRemovedMetric(Metric metric, String metricName, MetricGroup group) {
    if (!retainMetrics) {
        synchronized (this) {
            MetricGroup metricGroup = unwrap(group);
            Map<String, Metric> registeredMetrics = metrics.get(metricGroup);
            if (registeredMetrics != null) {
                registeredMetrics.remove(metricName);
                if (registeredMetrics.isEmpty()) {
                    metrics.remove(metricGroup);
                }
            }
        }
    }
}
Also used : OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) MetricGroup(org.apache.flink.metrics.MetricGroup) Metric(org.apache.flink.metrics.Metric)

Example 12 with Metric

use of org.apache.flink.metrics.Metric in project flink by apache.

the class SourceStreamTaskTestBase method testMetrics.

public void testMetrics(FunctionWithException<Environment, ? extends StreamTask<Integer, ?>, Exception> taskFactory, StreamOperatorFactory<?> operatorFactory, Matcher<Double> busyTimeMatcher) throws Exception {
    long sleepTime = 42;
    StreamTaskMailboxTestHarnessBuilder<Integer> builder = new StreamTaskMailboxTestHarnessBuilder<>(taskFactory, INT_TYPE_INFO);
    final Map<String, Metric> metrics = new ConcurrentHashMap<>();
    final TaskMetricGroup taskMetricGroup = StreamTaskTestHarness.createTaskMetricGroup(metrics);
    try (StreamTaskMailboxTestHarness<Integer> harness = builder.setupOutputForSingletonOperatorChain(operatorFactory).setTaskMetricGroup(taskMetricGroup).build()) {
        Future<Boolean> triggerFuture = harness.streamTask.triggerCheckpointAsync(new CheckpointMetaData(1L, System.currentTimeMillis()), CheckpointOptions.forCheckpointWithDefaultLocation());
        OneShotLatch checkpointAcknowledgeLatch = new OneShotLatch();
        harness.getCheckpointResponder().setAcknowledgeLatch(checkpointAcknowledgeLatch);
        assertFalse(triggerFuture.isDone());
        Thread.sleep(sleepTime);
        while (!triggerFuture.isDone()) {
            harness.streamTask.runMailboxStep();
        }
        Gauge<Long> checkpointStartDelayGauge = (Gauge<Long>) metrics.get(MetricNames.CHECKPOINT_START_DELAY_TIME);
        assertThat(checkpointStartDelayGauge.getValue(), greaterThanOrEqualTo(sleepTime * 1_000_000));
        Gauge<Double> busyTimeGauge = (Gauge<Double>) metrics.get(MetricNames.TASK_BUSY_TIME);
        assertThat(busyTimeGauge.getValue(), busyTimeMatcher);
        checkpointAcknowledgeLatch.await();
        TestCheckpointResponder.AcknowledgeReport acknowledgeReport = Iterables.getOnlyElement(harness.getCheckpointResponder().getAcknowledgeReports());
        assertThat(acknowledgeReport.getCheckpointMetrics().getCheckpointStartDelayNanos(), greaterThanOrEqualTo(sleepTime * 1_000_000));
    }
}
Also used : TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) Gauge(org.apache.flink.metrics.Gauge) TestCheckpointResponder(org.apache.flink.runtime.taskmanager.TestCheckpointResponder) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) Metric(org.apache.flink.metrics.Metric) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 13 with Metric

use of org.apache.flink.metrics.Metric in project flink by apache.

the class MultipleInputStreamTaskTest method testLatencyMarker.

@Test
public void testLatencyMarker() throws Exception {
    final Map<String, Metric> metrics = new ConcurrentHashMap<>();
    final TaskMetricGroup taskMetricGroup = StreamTaskTestHarness.createTaskMetricGroup(metrics);
    try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.DOUBLE_TYPE_INFO).setupOutputForSingletonOperatorChain(new MapToStringMultipleInputOperatorFactory(3)).setTaskMetricGroup(taskMetricGroup).build()) {
        ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
        OperatorID sourceId = new OperatorID();
        LatencyMarker latencyMarker = new LatencyMarker(42L, sourceId, 0);
        testHarness.processElement(latencyMarker);
        expectedOutput.add(latencyMarker);
        assertThat(testHarness.getOutput(), contains(expectedOutput.toArray()));
        testHarness.endInput();
        testHarness.waitForTaskCompletion();
    }
}
Also used : InterceptingTaskMetricGroup(org.apache.flink.runtime.metrics.util.InterceptingTaskMetricGroup) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) ArrayDeque(java.util.ArrayDeque) LatencyMarker(org.apache.flink.streaming.runtime.streamrecord.LatencyMarker) Metric(org.apache.flink.metrics.Metric) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test)

Example 14 with Metric

use of org.apache.flink.metrics.Metric in project flink by apache.

the class SinkMetricsITCase method assertSinkMetrics.

private void assertSinkMetrics(JobID jobId, long processedRecordsPerSubtask, int parallelism, int numSplits) {
    List<OperatorMetricGroup> groups = reporter.findOperatorMetricGroups(jobId, "MetricTestSink");
    assertThat(groups, hasSize(parallelism));
    int subtaskWithMetrics = 0;
    for (OperatorMetricGroup group : groups) {
        Map<String, Metric> metrics = reporter.getMetricsByGroup(group);
        // there are only 2 splits assigned; so two groups will not update metrics
        if (group.getIOMetricGroup().getNumRecordsOutCounter().getCount() == 0) {
            continue;
        }
        subtaskWithMetrics++;
        // I/O metrics
        assertThat(group.getIOMetricGroup().getNumRecordsOutCounter(), isCounter(equalTo(processedRecordsPerSubtask)));
        assertThat(group.getIOMetricGroup().getNumBytesOutCounter(), isCounter(equalTo(processedRecordsPerSubtask * MetricWriter.RECORD_SIZE_IN_BYTES)));
        // MetricWriter is just incrementing errors every even record
        assertThat(metrics.get(MetricNames.NUM_RECORDS_OUT_ERRORS), isCounter(equalTo((processedRecordsPerSubtask + 1) / 2)));
        // check if the latest send time is fetched
        assertThat(metrics.get(MetricNames.CURRENT_SEND_TIME), isGauge(equalTo((processedRecordsPerSubtask - 1) * MetricWriter.BASE_SEND_TIME)));
    }
    assertThat(subtaskWithMetrics, equalTo(numSplits));
}
Also used : Metric(org.apache.flink.metrics.Metric) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup)

Aggregations

Metric (org.apache.flink.metrics.Metric)14 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)7 TaskMetricGroup (org.apache.flink.runtime.metrics.groups.TaskMetricGroup)6 Test (org.junit.Test)6 Gauge (org.apache.flink.metrics.Gauge)4 OperatorMetricGroup (org.apache.flink.metrics.groups.OperatorMetricGroup)4 InterceptingTaskMetricGroup (org.apache.flink.runtime.metrics.util.InterceptingTaskMetricGroup)4 Configuration (org.apache.flink.configuration.Configuration)3 MetricGroup (org.apache.flink.metrics.MetricGroup)3 IOException (java.io.IOException)2 JobID (org.apache.flink.api.common.JobID)2 MetricReporter (org.apache.flink.metrics.reporter.MetricReporter)2 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)2 StreamConfig (org.apache.flink.streaming.api.graph.StreamConfig)2 Status (akka.actor.Status)1 SimpleEntry (java.util.AbstractMap.SimpleEntry)1 ArrayDeque (java.util.ArrayDeque)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collections (java.util.Collections)1