use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class MetricDumpSerializerTest method testSerialization.
@Test
public void testSerialization() throws IOException {
MetricDumpSerialization.MetricDumpSerializer serializer = new MetricDumpSerialization.MetricDumpSerializer();
MetricDumpSerialization.MetricDumpDeserializer deserializer = new MetricDumpSerialization.MetricDumpDeserializer();
Map<Counter, Tuple2<QueryScopeInfo, String>> counters = new HashMap<>();
Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> gauges = new HashMap<>();
Map<Histogram, Tuple2<QueryScopeInfo, String>> histograms = new HashMap<>();
Map<Meter, Tuple2<QueryScopeInfo, String>> meters = new HashMap<>();
SimpleCounter c1 = new SimpleCounter();
SimpleCounter c2 = new SimpleCounter();
c1.inc(1);
c2.inc(2);
Gauge<Integer> g1 = new Gauge<Integer>() {
@Override
public Integer getValue() {
return 4;
}
};
Histogram h1 = new TestHistogram();
Meter m1 = new Meter() {
@Override
public void markEvent() {
}
@Override
public void markEvent(long n) {
}
@Override
public double getRate() {
return 5;
}
@Override
public long getCount() {
return 10;
}
};
counters.put(c1, new Tuple2<QueryScopeInfo, String>(new QueryScopeInfo.JobManagerQueryScopeInfo("A"), "c1"));
counters.put(c2, new Tuple2<QueryScopeInfo, String>(new QueryScopeInfo.TaskManagerQueryScopeInfo("tmid", "B"), "c2"));
meters.put(m1, new Tuple2<QueryScopeInfo, String>(new QueryScopeInfo.JobQueryScopeInfo("jid", "C"), "c3"));
gauges.put(g1, new Tuple2<QueryScopeInfo, String>(new QueryScopeInfo.TaskQueryScopeInfo("jid", "vid", 2, "D"), "g1"));
histograms.put(h1, new Tuple2<QueryScopeInfo, String>(new QueryScopeInfo.OperatorQueryScopeInfo("jid", "vid", 2, "opname", "E"), "h1"));
MetricDumpSerialization.MetricSerializationResult serialized = serializer.serialize(counters, gauges, histograms, meters);
List<MetricDump> deserialized = deserializer.deserialize(serialized);
// ===== Counters
// ==============================================================================================
assertEquals(5, deserialized.size());
for (MetricDump metric : deserialized) {
switch(metric.getCategory()) {
case METRIC_CATEGORY_COUNTER:
MetricDump.CounterDump counterDump = (MetricDump.CounterDump) metric;
switch((byte) counterDump.count) {
case 1:
assertTrue(counterDump.scopeInfo instanceof QueryScopeInfo.JobManagerQueryScopeInfo);
assertEquals("A", counterDump.scopeInfo.scope);
assertEquals("c1", counterDump.name);
counters.remove(c1);
break;
case 2:
assertTrue(counterDump.scopeInfo instanceof QueryScopeInfo.TaskManagerQueryScopeInfo);
assertEquals("B", counterDump.scopeInfo.scope);
assertEquals("c2", counterDump.name);
assertEquals("tmid", ((QueryScopeInfo.TaskManagerQueryScopeInfo) counterDump.scopeInfo).taskManagerID);
counters.remove(c2);
break;
default:
fail();
}
break;
case METRIC_CATEGORY_GAUGE:
MetricDump.GaugeDump gaugeDump = (MetricDump.GaugeDump) metric;
assertEquals("4", gaugeDump.value);
assertEquals("g1", gaugeDump.name);
assertTrue(gaugeDump.scopeInfo instanceof QueryScopeInfo.TaskQueryScopeInfo);
QueryScopeInfo.TaskQueryScopeInfo taskInfo = (QueryScopeInfo.TaskQueryScopeInfo) gaugeDump.scopeInfo;
assertEquals("D", taskInfo.scope);
assertEquals("jid", taskInfo.jobID);
assertEquals("vid", taskInfo.vertexID);
assertEquals(2, taskInfo.subtaskIndex);
gauges.remove(g1);
break;
case METRIC_CATEGORY_HISTOGRAM:
MetricDump.HistogramDump histogramDump = (MetricDump.HistogramDump) metric;
assertEquals("h1", histogramDump.name);
assertEquals(0.5, histogramDump.median, 0.1);
assertEquals(0.75, histogramDump.p75, 0.1);
assertEquals(0.90, histogramDump.p90, 0.1);
assertEquals(0.95, histogramDump.p95, 0.1);
assertEquals(0.98, histogramDump.p98, 0.1);
assertEquals(0.99, histogramDump.p99, 0.1);
assertEquals(0.999, histogramDump.p999, 0.1);
assertEquals(4, histogramDump.mean, 0.1);
assertEquals(5, histogramDump.stddev, 0.1);
assertEquals(6, histogramDump.max);
assertEquals(7, histogramDump.min);
assertTrue(histogramDump.scopeInfo instanceof QueryScopeInfo.OperatorQueryScopeInfo);
QueryScopeInfo.OperatorQueryScopeInfo opInfo = (QueryScopeInfo.OperatorQueryScopeInfo) histogramDump.scopeInfo;
assertEquals("E", opInfo.scope);
assertEquals("jid", opInfo.jobID);
assertEquals("vid", opInfo.vertexID);
assertEquals(2, opInfo.subtaskIndex);
assertEquals("opname", opInfo.operatorName);
histograms.remove(h1);
break;
case METRIC_CATEGORY_METER:
MetricDump.MeterDump meterDump = (MetricDump.MeterDump) metric;
assertEquals(5.0, meterDump.rate, 0.1);
assertTrue(meterDump.scopeInfo instanceof QueryScopeInfo.JobQueryScopeInfo);
assertEquals("C", meterDump.scopeInfo.scope);
assertEquals("c3", meterDump.name);
assertEquals("jid", ((QueryScopeInfo.JobQueryScopeInfo) meterDump.scopeInfo).jobID);
break;
default:
fail();
}
}
assertTrue(counters.isEmpty());
assertTrue(gauges.isEmpty());
assertTrue(histograms.isEmpty());
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class TaskIOMetricGroupTest method testNumBytesProducedOfPartitionsMetrics.
@Test
public void testNumBytesProducedOfPartitionsMetrics() {
TaskMetricGroup task = UnregisteredMetricGroups.createUnregisteredTaskMetricGroup();
TaskIOMetricGroup taskIO = task.getIOMetricGroup();
Counter c1 = new SimpleCounter();
c1.inc(32L);
Counter c2 = new SimpleCounter();
c2.inc(64L);
IntermediateResultPartitionID resultPartitionID1 = new IntermediateResultPartitionID();
IntermediateResultPartitionID resultPartitionID2 = new IntermediateResultPartitionID();
taskIO.registerNumBytesProducedCounterForPartition(resultPartitionID1, c1);
taskIO.registerNumBytesProducedCounterForPartition(resultPartitionID2, c2);
Map<IntermediateResultPartitionID, Long> numBytesProducedOfPartitions = taskIO.createSnapshot().getNumBytesProducedOfPartitions();
assertEquals(2, numBytesProducedOfPartitions.size());
assertEquals(32L, numBytesProducedOfPartitions.get(resultPartitionID1).longValue());
assertEquals(64L, numBytesProducedOfPartitions.get(resultPartitionID2).longValue());
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class TaskIOMetricGroupTest method testTaskIOMetricGroup.
@Test
public void testTaskIOMetricGroup() throws InterruptedException {
TaskMetricGroup task = UnregisteredMetricGroups.createUnregisteredTaskMetricGroup();
TaskIOMetricGroup taskIO = task.getIOMetricGroup();
// test counter forwarding
assertNotNull(taskIO.getNumRecordsInCounter());
assertNotNull(taskIO.getNumRecordsOutCounter());
Counter c1 = new SimpleCounter();
c1.inc(32L);
Counter c2 = new SimpleCounter();
c2.inc(64L);
taskIO.reuseRecordsInputCounter(c1);
taskIO.reuseRecordsOutputCounter(c2);
assertEquals(32L, taskIO.getNumRecordsInCounter().getCount());
assertEquals(64L, taskIO.getNumRecordsOutCounter().getCount());
// test IOMetrics instantiation
taskIO.getNumBytesInCounter().inc(100L);
taskIO.getNumBytesOutCounter().inc(250L);
taskIO.getNumBuffersOutCounter().inc(3L);
taskIO.getIdleTimeMsPerSecond().markStart();
taskIO.getSoftBackPressuredTimePerSecond().markStart();
long softSleepTime = 2L;
Thread.sleep(softSleepTime);
taskIO.getIdleTimeMsPerSecond().markEnd();
taskIO.getSoftBackPressuredTimePerSecond().markEnd();
long hardSleepTime = 4L;
taskIO.getHardBackPressuredTimePerSecond().markStart();
Thread.sleep(hardSleepTime);
taskIO.getHardBackPressuredTimePerSecond().markEnd();
IOMetrics io = taskIO.createSnapshot();
assertEquals(32L, io.getNumRecordsIn());
assertEquals(64L, io.getNumRecordsOut());
assertEquals(100L, io.getNumBytesIn());
assertEquals(250L, io.getNumBytesOut());
assertEquals(3L, taskIO.getNumBuffersOutCounter().getCount());
assertThat(taskIO.getIdleTimeMsPerSecond().getCount(), greaterThanOrEqualTo(softSleepTime));
assertThat(taskIO.getSoftBackPressuredTimePerSecond().getCount(), greaterThanOrEqualTo(softSleepTime));
assertThat(taskIO.getHardBackPressuredTimePerSecond().getCount(), greaterThanOrEqualTo(hardSleepTime));
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class MetricQueryServiceTest method testHandleOversizedMetricMessage.
@Test
public void testHandleOversizedMetricMessage() throws Exception {
final long sizeLimit = 200L;
MetricQueryService queryService = MetricQueryService.createMetricQueryService(rpcService, ResourceID.generate(), sizeLimit);
queryService.start();
final TaskManagerMetricGroup tm = UnregisteredMetricGroups.createUnregisteredTaskManagerMetricGroup();
final String gaugeValue = "Hello";
final long requiredGaugesToExceedLimit = sizeLimit / gaugeValue.length() + 1;
List<Tuple2<String, Gauge<String>>> gauges = LongStream.range(0, requiredGaugesToExceedLimit).mapToObj(x -> Tuple2.of("gauge" + x, (Gauge<String>) () -> "Hello" + x)).collect(Collectors.toList());
gauges.forEach(gauge -> queryService.addMetric(gauge.f0, gauge.f1, tm));
queryService.addMetric("counter", new SimpleCounter(), tm);
queryService.addMetric("histogram", new TestHistogram(), tm);
queryService.addMetric("meter", new TestMeter(), tm);
MetricDumpSerialization.MetricSerializationResult dump = queryService.queryMetrics(TIMEOUT).get();
assertTrue(dump.serializedCounters.length > 0);
assertEquals(1, dump.numCounters);
assertTrue(dump.serializedMeters.length > 0);
assertEquals(1, dump.numMeters);
// gauges exceeded the size limit and will be excluded
assertEquals(0, dump.serializedGauges.length);
assertEquals(0, dump.numGauges);
assertTrue(dump.serializedHistograms.length > 0);
assertEquals(1, dump.numHistograms);
// unregister all but one gauge to ensure gauges are reported again if the remaining fit
for (int x = 1; x < gauges.size(); x++) {
queryService.removeMetric(gauges.get(x).f1);
}
MetricDumpSerialization.MetricSerializationResult recoveredDump = queryService.queryMetrics(TIMEOUT).get();
assertTrue(recoveredDump.serializedCounters.length > 0);
assertEquals(1, recoveredDump.numCounters);
assertTrue(recoveredDump.serializedMeters.length > 0);
assertEquals(1, recoveredDump.numMeters);
assertTrue(recoveredDump.serializedGauges.length > 0);
assertEquals(1, recoveredDump.numGauges);
assertTrue(recoveredDump.serializedHistograms.length > 0);
assertEquals(1, recoveredDump.numHistograms);
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class MetricRegistryImplTest method testMetricQueryServiceSetup.
@Test
public void testMetricQueryServiceSetup() throws Exception {
MetricRegistryImpl metricRegistry = new MetricRegistryImpl(MetricRegistryTestUtils.defaultMetricRegistryConfiguration());
Assert.assertNull(metricRegistry.getMetricQueryServiceGatewayRpcAddress());
metricRegistry.startQueryService(new TestingRpcService(), new ResourceID("mqs"));
MetricQueryServiceGateway metricQueryServiceGateway = metricRegistry.getMetricQueryServiceGateway();
Assert.assertNotNull(metricQueryServiceGateway);
metricRegistry.register(new SimpleCounter(), "counter", UnregisteredMetricGroups.createUnregisteredTaskManagerMetricGroup());
boolean metricsSuccessfullyQueried = false;
for (int x = 0; x < 10; x++) {
MetricDumpSerialization.MetricSerializationResult metricSerializationResult = metricQueryServiceGateway.queryMetrics(Time.seconds(5)).get(5, TimeUnit.SECONDS);
if (metricSerializationResult.numCounters == 1) {
metricsSuccessfullyQueried = true;
} else {
Thread.sleep(50);
}
}
Assert.assertTrue("metrics query did not return expected result", metricsSuccessfullyQueried);
}
Aggregations