use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class KafkaPartitionSplitReaderTest method testNumBytesInCounter.
@Test
public void testNumBytesInCounter() throws Exception {
final OperatorMetricGroup operatorMetricGroup = UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup();
final Counter numBytesInCounter = operatorMetricGroup.getIOMetricGroup().getNumBytesInCounter();
KafkaPartitionSplitReader reader = createReader(new Properties(), InternalSourceReaderMetricGroup.wrap(operatorMetricGroup));
// Add a split
reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(TOPIC1, 0), 0L))));
reader.fetch();
final long latestNumBytesIn = numBytesInCounter.getCount();
// Since it's hard to know the exact number of bytes consumed, we just check if it is
// greater than 0
assertThat(latestNumBytesIn, Matchers.greaterThan(0L));
// Add another split
reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(TOPIC2, 0), 0L))));
reader.fetch();
// We just check if numBytesIn is increasing
assertThat(numBytesInCounter.getCount(), Matchers.greaterThan(latestNumBytesIn));
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class ChainedOperatorsMetricTest method testOperatorIOMetricReuse.
@Test
public void testOperatorIOMetricReuse() throws Exception {
// environment
initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
this.mockEnv = new MockEnvironmentBuilder().setTaskName(HEAD_OPERATOR_NAME).setManagedMemorySize(MEMORY_MANAGER_SIZE).setInputSplitProvider(this.inputSplitProvider).setBufferSize(NETWORK_BUFFER_SIZE).setMetricGroup(TaskManagerMetricGroup.createTaskManagerMetricGroup(NoOpMetricRegistry.INSTANCE, "host", ResourceID.generate()).addJob(new JobID(), "jobName").addTask(new JobVertexID(), new ExecutionAttemptID(), "task", 0, 0)).build();
final int keyCnt = 100;
final int valCnt = 20;
final int numRecords = keyCnt * valCnt;
addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
addOutput(this.outList);
// the chained operator
addChainedOperator();
// creates the head operator and assembles the chain
registerTask(FlatMapDriver.class, DuplicatingFlatMapFunction.class);
final BatchTask<FlatMapFunction<Record, Record>, Record> testTask = new BatchTask<>(this.mockEnv);
testTask.invoke();
Assert.assertEquals(numRecords * 2 * 2, this.outList.size());
final TaskMetricGroup taskMetricGroup = mockEnv.getMetricGroup();
// verify task-level metrics
{
final TaskIOMetricGroup ioMetricGroup = taskMetricGroup.getIOMetricGroup();
final Counter numRecordsInCounter = ioMetricGroup.getNumRecordsInCounter();
final Counter numRecordsOutCounter = ioMetricGroup.getNumRecordsOutCounter();
Assert.assertEquals(numRecords, numRecordsInCounter.getCount());
Assert.assertEquals(numRecords * 2 * 2, numRecordsOutCounter.getCount());
}
// verify head operator metrics
{
// this only returns the existing group and doesn't create a new one
final OperatorMetricGroup operatorMetricGroup1 = taskMetricGroup.getOrAddOperator(HEAD_OPERATOR_NAME);
final OperatorIOMetricGroup ioMetricGroup = operatorMetricGroup1.getIOMetricGroup();
final Counter numRecordsInCounter = ioMetricGroup.getNumRecordsInCounter();
final Counter numRecordsOutCounter = ioMetricGroup.getNumRecordsOutCounter();
Assert.assertEquals(numRecords, numRecordsInCounter.getCount());
Assert.assertEquals(numRecords * 2, numRecordsOutCounter.getCount());
}
// verify chained operator metrics
{
// this only returns the existing group and doesn't create a new one
final InternalOperatorMetricGroup operatorMetricGroup1 = taskMetricGroup.getOrAddOperator(CHAINED_OPERATOR_NAME);
final InternalOperatorIOMetricGroup ioMetricGroup = operatorMetricGroup1.getIOMetricGroup();
final Counter numRecordsInCounter = ioMetricGroup.getNumRecordsInCounter();
final Counter numRecordsOutCounter = ioMetricGroup.getNumRecordsOutCounter();
Assert.assertEquals(numRecords * 2, numRecordsInCounter.getCount());
Assert.assertEquals(numRecords * 2 * 2, numRecordsOutCounter.getCount());
}
}
use of org.apache.flink.metrics.groups.OperatorMetricGroup in project flink by apache.
the class SinkMetricsITCase method assertSinkMetrics.
private void assertSinkMetrics(JobID jobId, long processedRecordsPerSubtask, int parallelism, int numSplits) {
List<OperatorMetricGroup> groups = reporter.findOperatorMetricGroups(jobId, "MetricTestSink");
assertThat(groups, hasSize(parallelism));
int subtaskWithMetrics = 0;
for (OperatorMetricGroup group : groups) {
Map<String, Metric> metrics = reporter.getMetricsByGroup(group);
// there are only 2 splits assigned; so two groups will not update metrics
if (group.getIOMetricGroup().getNumRecordsOutCounter().getCount() == 0) {
continue;
}
subtaskWithMetrics++;
// I/O metrics
assertThat(group.getIOMetricGroup().getNumRecordsOutCounter(), isCounter(equalTo(processedRecordsPerSubtask)));
assertThat(group.getIOMetricGroup().getNumBytesOutCounter(), isCounter(equalTo(processedRecordsPerSubtask * MetricWriter.RECORD_SIZE_IN_BYTES)));
// MetricWriter is just incrementing errors every even record
assertThat(metrics.get(MetricNames.NUM_RECORDS_OUT_ERRORS), isCounter(equalTo((processedRecordsPerSubtask + 1) / 2)));
// check if the latest send time is fetched
assertThat(metrics.get(MetricNames.CURRENT_SEND_TIME), isGauge(equalTo((processedRecordsPerSubtask - 1) * MetricWriter.BASE_SEND_TIME)));
}
assertThat(subtaskWithMetrics, equalTo(numSplits));
}
Aggregations