use of org.apache.hadoop.metrics2.MetricsSource in project hadoop by apache.
the class TestMetricsAnnotations method testFields.
@Test
public void testFields() {
MyMetrics metrics = new MyMetrics();
MetricsSource source = MetricsAnnotations.makeSource(metrics);
metrics.c1.incr();
metrics.c2.incr();
metrics.g1.incr();
metrics.g2.incr();
metrics.g3.incr();
metrics.r1.add(1);
metrics.s1.add(1);
metrics.rs1.add("rs1", 1);
MetricsRecordBuilder rb = getMetrics(source);
verify(rb).addCounter(info("C1", "C1"), 1);
verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
verify(rb).addGauge(info("G1", "G1"), 1);
verify(rb).addGauge(info("G2", "G2"), 1);
verify(rb).addGauge(info("G3", "g3 desc"), 1L);
verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}
use of org.apache.hadoop.metrics2.MetricsSource in project hbase by apache.
the class TestReplicationEndpoint method testMetricsSourceBaseSourcePassThrough.
@Test
public void testMetricsSourceBaseSourcePassThrough() {
/*
* The replication MetricsSource wraps a MetricsReplicationTableSourceImpl,
* MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource,
* so that metrics get written to both namespaces. Both of those classes wrap a
* MetricsReplicationSourceImpl that implements BaseSource, which allows
* for custom JMX metrics. This test checks to make sure the BaseSource decorator logic on
* MetricsSource actually calls down through the two layers of wrapping to the actual
* BaseSource.
*/
String id = "id";
DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class);
MetricsReplicationSourceImpl singleRms = mock(MetricsReplicationSourceImpl.class);
when(singleRms.getMetricsRegistry()).thenReturn(mockRegistry);
MetricsReplicationSourceImpl globalRms = mock(MetricsReplicationSourceImpl.class);
when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry);
MetricsReplicationSourceSource singleSourceSource = new MetricsReplicationSourceSourceImpl(singleRms, id);
MetricsReplicationGlobalSourceSource globalSourceSource = new MetricsReplicationGlobalSourceSourceImpl(globalRms);
MetricsReplicationGlobalSourceSource spyglobalSourceSource = spy(globalSourceSource);
doNothing().when(spyglobalSourceSource).incrFailedRecoveryQueue();
Map<String, MetricsReplicationTableSource> singleSourceSourceByTable = new HashMap<>();
MetricsSource source = new MetricsSource(id, singleSourceSource, spyglobalSourceSource, singleSourceSourceByTable);
String gaugeName = "gauge";
String singleGaugeName = "source.id." + gaugeName;
String globalGaugeName = "source." + gaugeName;
long delta = 1;
String counterName = "counter";
String singleCounterName = "source.id." + counterName;
String globalCounterName = "source." + counterName;
long count = 2;
source.decGauge(gaugeName, delta);
source.getMetricsContext();
source.getMetricsDescription();
source.getMetricsJmxContext();
source.getMetricsName();
source.incCounters(counterName, count);
source.incGauge(gaugeName, delta);
source.init();
source.removeMetric(gaugeName);
source.setGauge(gaugeName, delta);
source.updateHistogram(counterName, count);
source.incrFailedRecoveryQueue();
verify(singleRms).decGauge(singleGaugeName, delta);
verify(globalRms).decGauge(globalGaugeName, delta);
verify(globalRms).getMetricsContext();
verify(globalRms).getMetricsJmxContext();
verify(globalRms).getMetricsName();
verify(singleRms).incCounters(singleCounterName, count);
verify(globalRms).incCounters(globalCounterName, count);
verify(singleRms).incGauge(singleGaugeName, delta);
verify(globalRms).incGauge(globalGaugeName, delta);
verify(globalRms).init();
verify(singleRms).removeMetric(singleGaugeName);
verify(globalRms).removeMetric(globalGaugeName);
verify(singleRms).setGauge(singleGaugeName, delta);
verify(globalRms).setGauge(globalGaugeName, delta);
verify(singleRms).updateHistogram(singleCounterName, count);
verify(globalRms).updateHistogram(globalCounterName, count);
verify(spyglobalSourceSource).incrFailedRecoveryQueue();
// check singleSourceSourceByTable metrics.
// singleSourceSourceByTable map entry will be created only
// after calling #setAgeOfLastShippedOpByTable
boolean containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable");
Assert.assertEquals(false, containsRandomNewTable);
source.updateTableLevelMetrics(createWALEntriesWithSize("RandomNewTable"));
containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable");
Assert.assertEquals(true, containsRandomNewTable);
MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable().get("RandomNewTable");
// age should be greater than zero we created the entry with time in the past
Assert.assertTrue(msr.getLastShippedAge() > 0);
Assert.assertTrue(msr.getShippedBytes() > 0);
}
use of org.apache.hadoop.metrics2.MetricsSource in project hadoop by apache.
the class TestFSQueueMetrics method checkSchedulingPolicy.
private void checkSchedulingPolicy(String queueName, String policy) {
MetricsSource queueSource = TestQueueMetrics.queueSource(ms, queueName);
MetricsCollectorImpl collector = new MetricsCollectorImpl();
queueSource.getMetrics(collector, true);
MetricsRecords.assertTag(collector.getRecords().get(0), "SchedulingPolicy", policy);
}
use of org.apache.hadoop.metrics2.MetricsSource in project hadoop by apache.
the class TestQueueMetrics method testNodeTypeMetrics.
@Test
public void testNodeTypeMetrics() {
String parentQueueName = "root";
String leafQueueName = "root.leaf";
String user = "alice";
QueueMetrics parentMetrics = QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).from.getMetrics());
QueueMetrics metrics = QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
MetricsSource queueSource = queueSource(ms, leafQueueName);
//AppSchedulingInfo app = mockApp(user);
metrics.submitApp(user);
MetricsSource userSource = userSource(ms, leafQueueName, user);
MetricsSource parentUserSource = userSource(ms, parentQueueName, user);
metrics.incrNodeTypeAggregations(user, NodeType.NODE_LOCAL);
checkAggregatedNodeTypes(queueSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(userSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(parentUserSource, 1L, 0L, 0L);
metrics.incrNodeTypeAggregations(user, NodeType.RACK_LOCAL);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 0L);
metrics.incrNodeTypeAggregations(user, NodeType.OFF_SWITCH);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 1L);
metrics.incrNodeTypeAggregations(user, NodeType.OFF_SWITCH);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 2L);
}
use of org.apache.hadoop.metrics2.MetricsSource in project hadoop by apache.
the class TestQueueMetrics method checkAggregatedNodeTypes.
public static void checkAggregatedNodeTypes(MetricsSource source, long nodeLocal, long rackLocal, long offSwitch) {
MetricsRecordBuilder rb = getMetrics(source);
assertCounter("AggregateNodeLocalContainersAllocated", nodeLocal, rb);
assertCounter("AggregateRackLocalContainersAllocated", rackLocal, rb);
assertCounter("AggregateOffSwitchContainersAllocated", offSwitch, rb);
}
Aggregations