use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class SourceMetricsITCase method assertSourceMetrics.
private void assertSourceMetrics(JobID jobId, InMemoryReporter reporter, long processedRecordsPerSubtask, long numTotalPerSubtask, int parallelism, int numSplits, boolean hasTimestamps) {
List<OperatorMetricGroup> groups = reporter.findOperatorMetricGroups(jobId, "MetricTestingSource");
assertThat(groups, hasSize(parallelism));
int subtaskWithMetrics = 0;
for (OperatorMetricGroup group : groups) {
Map<String, Metric> metrics = reporter.getMetricsByGroup(group);
// there are only 2 splits assigned; so two groups will not update metrics
if (group.getIOMetricGroup().getNumRecordsInCounter().getCount() == 0) {
// assert that optional metrics are not initialized when no split assigned
assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED)));
assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue());
continue;
}
subtaskWithMetrics++;
// I/O metrics
assertThat(group.getIOMetricGroup().getNumRecordsInCounter(), isCounter(equalTo(processedRecordsPerSubtask)));
assertThat(group.getIOMetricGroup().getNumBytesInCounter(), isCounter(equalTo(processedRecordsPerSubtask * MockRecordEmitter.RECORD_SIZE_IN_BYTES)));
// MockRecordEmitter is just incrementing errors every even record
assertThat(metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS), isCounter(equalTo(processedRecordsPerSubtask / 2)));
if (hasTimestamps) {
// Timestamp assigner subtracting EVENTTIME_LAG from wall clock
assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)));
// Watermark is derived from timestamp, so it has to be in the same order of
// magnitude
assertThat(metrics.get(MetricNames.WATERMARK_LAG), isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)));
// Calculate the additional watermark lag (on top of event time lag)
Long watermarkLag = ((Gauge<Long>) metrics.get(MetricNames.WATERMARK_LAG)).getValue() - ((Gauge<Long>) metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)).getValue();
// That should correspond to the out-of-order boundedness
assertThat(watermarkLag, isCloseTo(WATERMARK_LAG, WATERMARK_EPSILON));
} else {
// assert that optional metrics are not initialized when no timestamp assigned
assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED)));
assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue());
}
long pendingRecords = numTotalPerSubtask - processedRecordsPerSubtask;
assertThat(metrics.get(MetricNames.PENDING_RECORDS), isGauge(equalTo(pendingRecords)));
assertThat(metrics.get(MetricNames.PENDING_BYTES), isGauge(equalTo(pendingRecords * MockRecordEmitter.RECORD_SIZE_IN_BYTES)));
// test is keeping source idle time metric busy with the barrier
assertThat(metrics.get(MetricNames.SOURCE_IDLE_TIME), isGauge(equalTo(0L)));
}
assertThat(subtaskWithMetrics, equalTo(numSplits));
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class AbstractStreamOperator method setup.
// ------------------------------------------------------------------------
// Life Cycle
// ------------------------------------------------------------------------
@Override
public void setup(StreamTask<?, ?> containingTask, StreamConfig config, Output<StreamRecord<OUT>> output) {
final Environment environment = containingTask.getEnvironment();
this.container = containingTask;
this.config = config;
try {
InternalOperatorMetricGroup operatorMetricGroup = environment.getMetricGroup().getOrAddOperator(config.getOperatorID(), config.getOperatorName());
this.output = new CountingOutput<>(output, operatorMetricGroup.getIOMetricGroup().getNumRecordsOutCounter());
if (config.isChainEnd()) {
operatorMetricGroup.getIOMetricGroup().reuseOutputMetricsForTask();
}
this.metrics = operatorMetricGroup;
} catch (Exception e) {
LOG.warn("An error occurred while instantiating task metrics.", e);
this.metrics = UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup();
this.output = output;
}
this.combinedWatermark = IndexedCombinedWatermarkStatus.forInputsCount(2);
try {
Configuration taskManagerConfig = environment.getTaskManagerInfo().getConfiguration();
int historySize = taskManagerConfig.getInteger(MetricOptions.LATENCY_HISTORY_SIZE);
if (historySize <= 0) {
LOG.warn("{} has been set to a value equal or below 0: {}. Using default.", MetricOptions.LATENCY_HISTORY_SIZE, historySize);
historySize = MetricOptions.LATENCY_HISTORY_SIZE.defaultValue();
}
final String configuredGranularity = taskManagerConfig.getString(MetricOptions.LATENCY_SOURCE_GRANULARITY);
LatencyStats.Granularity granularity;
try {
granularity = LatencyStats.Granularity.valueOf(configuredGranularity.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException iae) {
granularity = LatencyStats.Granularity.OPERATOR;
LOG.warn("Configured value {} option for {} is invalid. Defaulting to {}.", configuredGranularity, MetricOptions.LATENCY_SOURCE_GRANULARITY.key(), granularity);
}
MetricGroup jobMetricGroup = this.metrics.getJobMetricGroup();
this.latencyStats = new LatencyStats(jobMetricGroup.addGroup("latency"), historySize, container.getIndexInSubtaskGroup(), getOperatorID(), granularity);
} catch (Exception e) {
LOG.warn("An error occurred while instantiating latency metrics.", e);
this.latencyStats = new LatencyStats(UnregisteredMetricGroups.createUnregisteredTaskManagerJobMetricGroup().addGroup("latency"), 1, 0, new OperatorID(), LatencyStats.Granularity.SINGLE);
}
this.runtimeContext = new StreamingRuntimeContext(environment, environment.getAccumulatorRegistry().getUserMap(), getMetricGroup(), getOperatorID(), getProcessingTimeService(), null, environment.getExternalResourceInfoProvider());
stateKeySelector1 = config.getStatePartitioner(0, getUserCodeClassloader());
stateKeySelector2 = config.getStatePartitioner(1, getUserCodeClassloader());
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class MultipleInputStreamTaskTest method testMetrics.
/**
* With chained sources, task's and main operator's number of input records are two different
* things. The first one should take into account only records comming in from the network,
* ignoring records produced inside the task itself (like via a chained source). Main operator
* should on the other hand report all records from all of the inputs (regardless if it's a
* network or chained input).
*/
@Test
public void testMetrics() throws Exception {
HashMap<String, OperatorMetricGroup> operatorMetrics = new HashMap<>();
TaskMetricGroup taskMetricGroup = new UnregisteredMetricGroups.UnregisteredTaskMetricGroup() {
@Override
public InternalOperatorMetricGroup getOrAddOperator(OperatorID operatorID, String name) {
InternalOperatorMetricGroup operatorMetricGroup = super.getOrAddOperator(operatorID, name);
operatorMetrics.put(name, operatorMetricGroup);
return operatorMetricGroup;
}
};
String mainOperatorName = "MainOperator";
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new LifeCycleTrackingMockSource(Boundedness.BOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(3)).name(mainOperatorName).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).chain(new OneInputStreamTaskTest.DuplicatingOperator(), BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig())).finish().setTaskMetricGroup(taskMetricGroup).build()) {
assertTrue(operatorMetrics.containsKey(mainOperatorName));
OperatorMetricGroup mainOperatorMetrics = operatorMetrics.get(mainOperatorName);
Counter numRecordsInCounter = taskMetricGroup.getIOMetricGroup().getNumRecordsInCounter();
Counter numRecordsOutCounter = taskMetricGroup.getIOMetricGroup().getNumRecordsOutCounter();
int numRecords1 = 5;
int numRecords2 = 3;
int numRecords3 = 2;
// end prematurely
for (int x = 0; x < numRecords2; x++) {
addSourceRecords(testHarness, 1, 42);
}
for (int x = 0; x < numRecords1; x++) {
testHarness.processElement(new StreamRecord<>("hello"), 0, 0);
}
for (int x = 0; x < numRecords3; x++) {
testHarness.processElement(new StreamRecord<>("hello"), 1, 0);
}
int networkRecordsIn = numRecords1 + numRecords3;
int mainOperatorRecordsIn = networkRecordsIn + numRecords2;
int totalRecordsOut = mainOperatorRecordsIn * 2 * 2 * // there are three operators duplicating the records
2;
assertEquals(mainOperatorRecordsIn, mainOperatorMetrics.getIOMetricGroup().getNumRecordsInCounter().getCount());
assertEquals(networkRecordsIn, numRecordsInCounter.getCount());
assertEquals(totalRecordsOut, numRecordsOutCounter.getCount());
testHarness.waitForTaskCompletion();
}
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class RichAsyncFunctionTest method testRuntimeContext.
/**
* Test the set of runtime context methods in the context of a {@link RichAsyncFunction}.
*/
@Test
public void testRuntimeContext() throws Exception {
RichAsyncFunction<Integer, Integer> function = new RichAsyncFunction<Integer, Integer>() {
private static final long serialVersionUID = 1707630162838967972L;
@Override
public void asyncInvoke(Integer input, ResultFuture<Integer> resultFuture) throws Exception {
// no op
}
};
final String taskName = "foobarTask";
final OperatorMetricGroup metricGroup = UnregisteredMetricsGroup.createOperatorMetricGroup();
final int numberOfParallelSubtasks = 42;
final int indexOfSubtask = 43;
final int attemptNumber = 1337;
final String taskNameWithSubtask = "barfoo";
final ExecutionConfig executionConfig = mock(ExecutionConfig.class);
final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
when(mockedRuntimeContext.getTaskName()).thenReturn(taskName);
when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
when(mockedRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks);
when(mockedRuntimeContext.getIndexOfThisSubtask()).thenReturn(indexOfSubtask);
when(mockedRuntimeContext.getAttemptNumber()).thenReturn(attemptNumber);
when(mockedRuntimeContext.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask);
when(mockedRuntimeContext.getExecutionConfig()).thenReturn(executionConfig);
when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
function.setRuntimeContext(mockedRuntimeContext);
RuntimeContext runtimeContext = function.getRuntimeContext();
assertEquals(taskName, runtimeContext.getTaskName());
assertEquals(metricGroup, runtimeContext.getMetricGroup());
assertEquals(numberOfParallelSubtasks, runtimeContext.getNumberOfParallelSubtasks());
assertEquals(indexOfSubtask, runtimeContext.getIndexOfThisSubtask());
assertEquals(attemptNumber, runtimeContext.getAttemptNumber());
assertEquals(taskNameWithSubtask, runtimeContext.getTaskNameWithSubtasks());
assertEquals(executionConfig, runtimeContext.getExecutionConfig());
assertEquals(userCodeClassLoader, runtimeContext.getUserCodeClassLoader());
try {
runtimeContext.getDistributedCache();
fail("Expected getDistributedCached to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getState(new ValueStateDescriptor<>("foobar", Integer.class, 42));
fail("Expected getState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getListState(new ListStateDescriptor<>("foobar", Integer.class));
fail("Expected getListState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getReducingState(new ReducingStateDescriptor<>("foobar", new ReduceFunction<Integer>() {
private static final long serialVersionUID = 2136425961884441050L;
@Override
public Integer reduce(Integer value1, Integer value2) throws Exception {
return value1;
}
}, Integer.class));
fail("Expected getReducingState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAggregatingState(new AggregatingStateDescriptor<>("foobar", new AggregateFunction<Integer, Integer, Integer>() {
@Override
public Integer createAccumulator() {
return null;
}
@Override
public Integer add(Integer value, Integer accumulator) {
return null;
}
@Override
public Integer getResult(Integer accumulator) {
return null;
}
@Override
public Integer merge(Integer a, Integer b) {
return null;
}
}, Integer.class));
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getMapState(new MapStateDescriptor<>("foobar", Integer.class, String.class));
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.addAccumulator("foobar", new Accumulator<Integer, Integer>() {
private static final long serialVersionUID = -4673320336846482358L;
@Override
public void add(Integer value) {
// no op
}
@Override
public Integer getLocalValue() {
return null;
}
@Override
public void resetLocal() {
}
@Override
public void merge(Accumulator<Integer, Integer> other) {
}
@Override
public Accumulator<Integer, Integer> clone() {
return null;
}
});
fail("Expected addAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAccumulator("foobar");
fail("Expected getAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getIntCounter("foobar");
fail("Expected getIntCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getLongCounter("foobar");
fail("Expected getLongCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getDoubleCounter("foobar");
fail("Expected getDoubleCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getHistogram("foobar");
fail("Expected getHistogram to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.hasBroadcastVariable("foobar");
fail("Expected hasBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariable("foobar");
fail("Expected getBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariableWithInitializer("foobar", new BroadcastVariableInitializer<Object, Object>() {
@Override
public Object initializeBroadcastVariable(Iterable<Object> data) {
return null;
}
});
fail("Expected getBroadcastVariableWithInitializer to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class CepRuntimeContextTest method testCepRuntimeContext.
@Test
public void testCepRuntimeContext() {
final String taskName = "foobarTask";
final OperatorMetricGroup metricGroup = UnregisteredMetricsGroup.createOperatorMetricGroup();
final int numberOfParallelSubtasks = 42;
final int indexOfSubtask = 43;
final int attemptNumber = 1337;
final String taskNameWithSubtask = "barfoo";
final ExecutionConfig executionConfig = mock(ExecutionConfig.class);
final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
final DistributedCache distributedCache = mock(DistributedCache.class);
RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
when(mockedRuntimeContext.getTaskName()).thenReturn(taskName);
when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
when(mockedRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks);
when(mockedRuntimeContext.getIndexOfThisSubtask()).thenReturn(indexOfSubtask);
when(mockedRuntimeContext.getAttemptNumber()).thenReturn(attemptNumber);
when(mockedRuntimeContext.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask);
when(mockedRuntimeContext.getExecutionConfig()).thenReturn(executionConfig);
when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
when(mockedRuntimeContext.getDistributedCache()).thenReturn(distributedCache);
RuntimeContext runtimeContext = new CepRuntimeContext(mockedRuntimeContext);
assertEquals(taskName, runtimeContext.getTaskName());
assertEquals(metricGroup, runtimeContext.getMetricGroup());
assertEquals(numberOfParallelSubtasks, runtimeContext.getNumberOfParallelSubtasks());
assertEquals(indexOfSubtask, runtimeContext.getIndexOfThisSubtask());
assertEquals(attemptNumber, runtimeContext.getAttemptNumber());
assertEquals(taskNameWithSubtask, runtimeContext.getTaskNameWithSubtasks());
assertEquals(executionConfig, runtimeContext.getExecutionConfig());
assertEquals(userCodeClassLoader, runtimeContext.getUserCodeClassLoader());
assertEquals(distributedCache, runtimeContext.getDistributedCache());
try {
runtimeContext.getState(new ValueStateDescriptor<>("foobar", Integer.class, 42));
fail("Expected getState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getListState(new ListStateDescriptor<>("foobar", Integer.class));
fail("Expected getListState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getReducingState(new ReducingStateDescriptor<>("foobar", mock(ReduceFunction.class), Integer.class));
fail("Expected getReducingState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAggregatingState(new AggregatingStateDescriptor<>("foobar", mock(AggregateFunction.class), Integer.class));
fail("Expected getAggregatingState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getMapState(new MapStateDescriptor<>("foobar", Integer.class, String.class));
fail("Expected getMapState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.addAccumulator("foobar", mock(Accumulator.class));
fail("Expected addAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAccumulator("foobar");
fail("Expected getAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getIntCounter("foobar");
fail("Expected getIntCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getLongCounter("foobar");
fail("Expected getLongCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getDoubleCounter("foobar");
fail("Expected getDoubleCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getHistogram("foobar");
fail("Expected getHistogram to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.hasBroadcastVariable("foobar");
fail("Expected hasBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariable("foobar");
fail("Expected getBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariableWithInitializer("foobar", mock(BroadcastVariableInitializer.class));
fail("Expected getBroadcastVariableWithInitializer to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
}
Aggregations