Search in sources :

Example 1 with OperatorMetricGroup

use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.

the class SourceMetricsITCase method assertSourceMetrics.

private void assertSourceMetrics(JobID jobId, InMemoryReporter reporter, long processedRecordsPerSubtask, long numTotalPerSubtask, int parallelism, int numSplits, boolean hasTimestamps) {
    List<OperatorMetricGroup> groups = reporter.findOperatorMetricGroups(jobId, "MetricTestingSource");
    assertThat(groups, hasSize(parallelism));
    int subtaskWithMetrics = 0;
    for (OperatorMetricGroup group : groups) {
        Map<String, Metric> metrics = reporter.getMetricsByGroup(group);
        // there are only 2 splits assigned; so two groups will not update metrics
        if (group.getIOMetricGroup().getNumRecordsInCounter().getCount() == 0) {
            // assert that optional metrics are not initialized when no split assigned
            assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED)));
            assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue());
            continue;
        }
        subtaskWithMetrics++;
        // I/O metrics
        assertThat(group.getIOMetricGroup().getNumRecordsInCounter(), isCounter(equalTo(processedRecordsPerSubtask)));
        assertThat(group.getIOMetricGroup().getNumBytesInCounter(), isCounter(equalTo(processedRecordsPerSubtask * MockRecordEmitter.RECORD_SIZE_IN_BYTES)));
        // MockRecordEmitter is just incrementing errors every even record
        assertThat(metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS), isCounter(equalTo(processedRecordsPerSubtask / 2)));
        if (hasTimestamps) {
            // Timestamp assigner subtracting EVENTTIME_LAG from wall clock
            assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)));
            // Watermark is derived from timestamp, so it has to be in the same order of
            // magnitude
            assertThat(metrics.get(MetricNames.WATERMARK_LAG), isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)));
            // Calculate the additional watermark lag (on top of event time lag)
            Long watermarkLag = ((Gauge<Long>) metrics.get(MetricNames.WATERMARK_LAG)).getValue() - ((Gauge<Long>) metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)).getValue();
            // That should correspond to the out-of-order boundedness
            assertThat(watermarkLag, isCloseTo(WATERMARK_LAG, WATERMARK_EPSILON));
        } else {
            // assert that optional metrics are not initialized when no timestamp assigned
            assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED)));
            assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue());
        }
        long pendingRecords = numTotalPerSubtask - processedRecordsPerSubtask;
        assertThat(metrics.get(MetricNames.PENDING_RECORDS), isGauge(equalTo(pendingRecords)));
        assertThat(metrics.get(MetricNames.PENDING_BYTES), isGauge(equalTo(pendingRecords * MockRecordEmitter.RECORD_SIZE_IN_BYTES)));
        // test is keeping source idle time metric busy with the barrier
        assertThat(metrics.get(MetricNames.SOURCE_IDLE_TIME), isGauge(equalTo(0L)));
    }
    assertThat(subtaskWithMetrics, equalTo(numSplits));
}
Also used : Metric(org.apache.flink.metrics.Metric) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup)

Example 2 with OperatorMetricGroup

use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.

the class AbstractStreamOperator method setup.

// ------------------------------------------------------------------------
// Life Cycle
// ------------------------------------------------------------------------
@Override
public void setup(StreamTask<?, ?> containingTask, StreamConfig config, Output<StreamRecord<OUT>> output) {
    final Environment environment = containingTask.getEnvironment();
    this.container = containingTask;
    this.config = config;
    try {
        InternalOperatorMetricGroup operatorMetricGroup = environment.getMetricGroup().getOrAddOperator(config.getOperatorID(), config.getOperatorName());
        this.output = new CountingOutput<>(output, operatorMetricGroup.getIOMetricGroup().getNumRecordsOutCounter());
        if (config.isChainEnd()) {
            operatorMetricGroup.getIOMetricGroup().reuseOutputMetricsForTask();
        }
        this.metrics = operatorMetricGroup;
    } catch (Exception e) {
        LOG.warn("An error occurred while instantiating task metrics.", e);
        this.metrics = UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup();
        this.output = output;
    }
    this.combinedWatermark = IndexedCombinedWatermarkStatus.forInputsCount(2);
    try {
        Configuration taskManagerConfig = environment.getTaskManagerInfo().getConfiguration();
        int historySize = taskManagerConfig.getInteger(MetricOptions.LATENCY_HISTORY_SIZE);
        if (historySize <= 0) {
            LOG.warn("{} has been set to a value equal or below 0: {}. Using default.", MetricOptions.LATENCY_HISTORY_SIZE, historySize);
            historySize = MetricOptions.LATENCY_HISTORY_SIZE.defaultValue();
        }
        final String configuredGranularity = taskManagerConfig.getString(MetricOptions.LATENCY_SOURCE_GRANULARITY);
        LatencyStats.Granularity granularity;
        try {
            granularity = LatencyStats.Granularity.valueOf(configuredGranularity.toUpperCase(Locale.ROOT));
        } catch (IllegalArgumentException iae) {
            granularity = LatencyStats.Granularity.OPERATOR;
            LOG.warn("Configured value {} option for {} is invalid. Defaulting to {}.", configuredGranularity, MetricOptions.LATENCY_SOURCE_GRANULARITY.key(), granularity);
        }
        MetricGroup jobMetricGroup = this.metrics.getJobMetricGroup();
        this.latencyStats = new LatencyStats(jobMetricGroup.addGroup("latency"), historySize, container.getIndexInSubtaskGroup(), getOperatorID(), granularity);
    } catch (Exception e) {
        LOG.warn("An error occurred while instantiating latency metrics.", e);
        this.latencyStats = new LatencyStats(UnregisteredMetricGroups.createUnregisteredTaskManagerJobMetricGroup().addGroup("latency"), 1, 0, new OperatorID(), LatencyStats.Granularity.SINGLE);
    }
    this.runtimeContext = new StreamingRuntimeContext(environment, environment.getAccumulatorRegistry().getUserMap(), getMetricGroup(), getOperatorID(), getProcessingTimeService(), null, environment.getExternalResourceInfoProvider());
    stateKeySelector1 = config.getStatePartitioner(0, getUserCodeClassloader());
    stateKeySelector2 = config.getStatePartitioner(1, getUserCodeClassloader());
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InternalOperatorMetricGroup(org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) MetricGroup(org.apache.flink.metrics.MetricGroup) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) InternalOperatorMetricGroup(org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup) Environment(org.apache.flink.runtime.execution.Environment) LatencyStats(org.apache.flink.streaming.util.LatencyStats)

Example 3 with OperatorMetricGroup

use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.

the class MultipleInputStreamTaskTest method testMetrics.

/**
 * With chained sources, task's and main operator's number of input records are two different
 * things. The first one should take into account only records comming in from the network,
 * ignoring records produced inside the task itself (like via a chained source). Main operator
 * should on the other hand report all records from all of the inputs (regardless if it's a
 * network or chained input).
 */
@Test
public void testMetrics() throws Exception {
    HashMap<String, OperatorMetricGroup> operatorMetrics = new HashMap<>();
    TaskMetricGroup taskMetricGroup = new UnregisteredMetricGroups.UnregisteredTaskMetricGroup() {

        @Override
        public InternalOperatorMetricGroup getOrAddOperator(OperatorID operatorID, String name) {
            InternalOperatorMetricGroup operatorMetricGroup = super.getOrAddOperator(operatorID, name);
            operatorMetrics.put(name, operatorMetricGroup);
            return operatorMetricGroup;
        }
    };
    String mainOperatorName = "MainOperator";
    try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new LifeCycleTrackingMockSource(Boundedness.BOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(3)).name(mainOperatorName).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).finish().setTaskMetricGroup(taskMetricGroup).build()) {
        assertTrue(operatorMetrics.containsKey(mainOperatorName));
        OperatorMetricGroup mainOperatorMetrics = operatorMetrics.get(mainOperatorName);
        Counter numRecordsInCounter = taskMetricGroup.getIOMetricGroup().getNumRecordsInCounter();
        Counter numRecordsOutCounter = taskMetricGroup.getIOMetricGroup().getNumRecordsOutCounter();
        int numRecords1 = 5;
        int numRecords2 = 3;
        int numRecords3 = 2;
        // end prematurely
        for (int x = 0; x < numRecords2; x++) {
            addSourceRecords(testHarness, 1, 42);
        }
        for (int x = 0; x < numRecords1; x++) {
            testHarness.processElement(new StreamRecord<>("hello"), 0, 0);
        }
        for (int x = 0; x < numRecords3; x++) {
            testHarness.processElement(new StreamRecord<>("hello"), 1, 0);
        }
        int networkRecordsIn = numRecords1 + numRecords3;
        int mainOperatorRecordsIn = networkRecordsIn + numRecords2;
        int totalRecordsOut = mainOperatorRecordsIn * 2 * 2 * // there are three operators duplicating the records
        2;
        assertEquals(mainOperatorRecordsIn, mainOperatorMetrics.getIOMetricGroup().getNumRecordsInCounter().getCount());
        assertEquals(networkRecordsIn, numRecordsInCounter.getCount());
        assertEquals(totalRecordsOut, numRecordsOutCounter.getCount());
        testHarness.waitForTaskCompletion();
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InterceptingTaskMetricGroup(org.apache.flink.runtime.metrics.util.InterceptingTaskMetricGroup) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) StreamTaskFinalCheckpointsTest.triggerCheckpoint(org.apache.flink.streaming.runtime.tasks.StreamTaskFinalCheckpointsTest.triggerCheckpoint) InternalOperatorMetricGroup(org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup) Counter(org.apache.flink.metrics.Counter) InternalOperatorMetricGroup(org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup) InterceptingOperatorMetricGroup(org.apache.flink.runtime.metrics.util.InterceptingOperatorMetricGroup) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) Test(org.junit.Test)

Example 4 with OperatorMetricGroup

use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.

the class RichAsyncFunctionTest method testRuntimeContext.

/**
 * Test the set of runtime context methods in the context of a {@link RichAsyncFunction}.
 */
@Test
public void testRuntimeContext() throws Exception {
    RichAsyncFunction<Integer, Integer> function = new RichAsyncFunction<Integer, Integer>() {

        private static final long serialVersionUID = 1707630162838967972L;

        @Override
        public void asyncInvoke(Integer input, ResultFuture<Integer> resultFuture) throws Exception {
        // no op
        }
    };
    final String taskName = "foobarTask";
    final OperatorMetricGroup metricGroup = UnregisteredMetricsGroup.createOperatorMetricGroup();
    final int numberOfParallelSubtasks = 42;
    final int indexOfSubtask = 43;
    final int attemptNumber = 1337;
    final String taskNameWithSubtask = "barfoo";
    final ExecutionConfig executionConfig = mock(ExecutionConfig.class);
    final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
    RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
    when(mockedRuntimeContext.getTaskName()).thenReturn(taskName);
    when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
    when(mockedRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks);
    when(mockedRuntimeContext.getIndexOfThisSubtask()).thenReturn(indexOfSubtask);
    when(mockedRuntimeContext.getAttemptNumber()).thenReturn(attemptNumber);
    when(mockedRuntimeContext.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask);
    when(mockedRuntimeContext.getExecutionConfig()).thenReturn(executionConfig);
    when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
    function.setRuntimeContext(mockedRuntimeContext);
    RuntimeContext runtimeContext = function.getRuntimeContext();
    assertEquals(taskName, runtimeContext.getTaskName());
    assertEquals(metricGroup, runtimeContext.getMetricGroup());
    assertEquals(numberOfParallelSubtasks, runtimeContext.getNumberOfParallelSubtasks());
    assertEquals(indexOfSubtask, runtimeContext.getIndexOfThisSubtask());
    assertEquals(attemptNumber, runtimeContext.getAttemptNumber());
    assertEquals(taskNameWithSubtask, runtimeContext.getTaskNameWithSubtasks());
    assertEquals(executionConfig, runtimeContext.getExecutionConfig());
    assertEquals(userCodeClassLoader, runtimeContext.getUserCodeClassLoader());
    try {
        runtimeContext.getDistributedCache();
        fail("Expected getDistributedCached to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getState(new ValueStateDescriptor<>("foobar", Integer.class, 42));
        fail("Expected getState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getListState(new ListStateDescriptor<>("foobar", Integer.class));
        fail("Expected getListState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getReducingState(new ReducingStateDescriptor<>("foobar", new ReduceFunction<Integer>() {

            private static final long serialVersionUID = 2136425961884441050L;

            @Override
            public Integer reduce(Integer value1, Integer value2) throws Exception {
                return value1;
            }
        }, Integer.class));
        fail("Expected getReducingState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getAggregatingState(new AggregatingStateDescriptor<>("foobar", new AggregateFunction<Integer, Integer, Integer>() {

            @Override
            public Integer createAccumulator() {
                return null;
            }

            @Override
            public Integer add(Integer value, Integer accumulator) {
                return null;
            }

            @Override
            public Integer getResult(Integer accumulator) {
                return null;
            }

            @Override
            public Integer merge(Integer a, Integer b) {
                return null;
            }
        }, Integer.class));
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getMapState(new MapStateDescriptor<>("foobar", Integer.class, String.class));
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.addAccumulator("foobar", new Accumulator<Integer, Integer>() {

            private static final long serialVersionUID = -4673320336846482358L;

            @Override
            public void add(Integer value) {
            // no op
            }

            @Override
            public Integer getLocalValue() {
                return null;
            }

            @Override
            public void resetLocal() {
            }

            @Override
            public void merge(Accumulator<Integer, Integer> other) {
            }

            @Override
            public Accumulator<Integer, Integer> clone() {
                return null;
            }
        });
        fail("Expected addAccumulator to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getAccumulator("foobar");
        fail("Expected getAccumulator to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getIntCounter("foobar");
        fail("Expected getIntCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getLongCounter("foobar");
        fail("Expected getLongCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getDoubleCounter("foobar");
        fail("Expected getDoubleCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getHistogram("foobar");
        fail("Expected getHistogram to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.hasBroadcastVariable("foobar");
        fail("Expected hasBroadcastVariable to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getBroadcastVariable("foobar");
        fail("Expected getBroadcastVariable to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getBroadcastVariableWithInitializer("foobar", new BroadcastVariableInitializer<Object, Object>() {

            @Override
            public Object initializeBroadcastVariable(Iterable<Object> data) {
                return null;
            }
        });
        fail("Expected getBroadcastVariableWithInitializer to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
}
Also used : Accumulator(org.apache.flink.api.common.accumulators.Accumulator) ReduceFunction(org.apache.flink.api.common.functions.ReduceFunction) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) IterationRuntimeContext(org.apache.flink.api.common.functions.IterationRuntimeContext) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) Test(org.junit.Test)

Example 5 with OperatorMetricGroup

use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.

the class CepRuntimeContextTest method testCepRuntimeContext.

@Test
public void testCepRuntimeContext() {
    final String taskName = "foobarTask";
    final OperatorMetricGroup metricGroup = UnregisteredMetricsGroup.createOperatorMetricGroup();
    final int numberOfParallelSubtasks = 42;
    final int indexOfSubtask = 43;
    final int attemptNumber = 1337;
    final String taskNameWithSubtask = "barfoo";
    final ExecutionConfig executionConfig = mock(ExecutionConfig.class);
    final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
    final DistributedCache distributedCache = mock(DistributedCache.class);
    RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
    when(mockedRuntimeContext.getTaskName()).thenReturn(taskName);
    when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
    when(mockedRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks);
    when(mockedRuntimeContext.getIndexOfThisSubtask()).thenReturn(indexOfSubtask);
    when(mockedRuntimeContext.getAttemptNumber()).thenReturn(attemptNumber);
    when(mockedRuntimeContext.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask);
    when(mockedRuntimeContext.getExecutionConfig()).thenReturn(executionConfig);
    when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
    when(mockedRuntimeContext.getDistributedCache()).thenReturn(distributedCache);
    RuntimeContext runtimeContext = new CepRuntimeContext(mockedRuntimeContext);
    assertEquals(taskName, runtimeContext.getTaskName());
    assertEquals(metricGroup, runtimeContext.getMetricGroup());
    assertEquals(numberOfParallelSubtasks, runtimeContext.getNumberOfParallelSubtasks());
    assertEquals(indexOfSubtask, runtimeContext.getIndexOfThisSubtask());
    assertEquals(attemptNumber, runtimeContext.getAttemptNumber());
    assertEquals(taskNameWithSubtask, runtimeContext.getTaskNameWithSubtasks());
    assertEquals(executionConfig, runtimeContext.getExecutionConfig());
    assertEquals(userCodeClassLoader, runtimeContext.getUserCodeClassLoader());
    assertEquals(distributedCache, runtimeContext.getDistributedCache());
    try {
        runtimeContext.getState(new ValueStateDescriptor<>("foobar", Integer.class, 42));
        fail("Expected getState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getListState(new ListStateDescriptor<>("foobar", Integer.class));
        fail("Expected getListState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getReducingState(new ReducingStateDescriptor<>("foobar", mock(ReduceFunction.class), Integer.class));
        fail("Expected getReducingState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getAggregatingState(new AggregatingStateDescriptor<>("foobar", mock(AggregateFunction.class), Integer.class));
        fail("Expected getAggregatingState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getMapState(new MapStateDescriptor<>("foobar", Integer.class, String.class));
        fail("Expected getMapState to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.addAccumulator("foobar", mock(Accumulator.class));
        fail("Expected addAccumulator to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getAccumulator("foobar");
        fail("Expected getAccumulator to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getIntCounter("foobar");
        fail("Expected getIntCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getLongCounter("foobar");
        fail("Expected getLongCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getDoubleCounter("foobar");
        fail("Expected getDoubleCounter to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getHistogram("foobar");
        fail("Expected getHistogram to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.hasBroadcastVariable("foobar");
        fail("Expected hasBroadcastVariable to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getBroadcastVariable("foobar");
        fail("Expected getBroadcastVariable to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
    try {
        runtimeContext.getBroadcastVariableWithInitializer("foobar", mock(BroadcastVariableInitializer.class));
        fail("Expected getBroadcastVariableWithInitializer to fail with unsupported operation exception.");
    } catch (UnsupportedOperationException e) {
    // expected
    }
}
Also used : Accumulator(org.apache.flink.api.common.accumulators.Accumulator) BroadcastVariableInitializer(org.apache.flink.api.common.functions.BroadcastVariableInitializer) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) DistributedCache(org.apache.flink.api.common.cache.DistributedCache) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) OperatorMetricGroup(org.apache.flink.metrics.groups.OperatorMetricGroup) Test(org.junit.Test)

Aggregations

OperatorMetricGroup (org.apache.flink.metrics.groups.OperatorMetricGroup)8 Test (org.junit.Test)4 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)3 Counter (org.apache.flink.metrics.Counter)3 InternalOperatorMetricGroup (org.apache.flink.runtime.metrics.groups.InternalOperatorMetricGroup)3 Accumulator (org.apache.flink.api.common.accumulators.Accumulator)2 RuntimeContext (org.apache.flink.api.common.functions.RuntimeContext)2 Metric (org.apache.flink.metrics.Metric)2 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)2 TaskMetricGroup (org.apache.flink.runtime.metrics.groups.TaskMetricGroup)2 HashMap (java.util.HashMap)1 Properties (java.util.Properties)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 JobID (org.apache.flink.api.common.JobID)1 DistributedCache (org.apache.flink.api.common.cache.DistributedCache)1 AggregateFunction (org.apache.flink.api.common.functions.AggregateFunction)1 BroadcastVariableInitializer (org.apache.flink.api.common.functions.BroadcastVariableInitializer)1 FlatMapFunction (org.apache.flink.api.common.functions.FlatMapFunction)1 IterationRuntimeContext (org.apache.flink.api.common.functions.IterationRuntimeContext)1 ReduceFunction (org.apache.flink.api.common.functions.ReduceFunction)1