use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestQueueMetrics method testTwoLevelWithUserMetrics.
@Test
public void testTwoLevelWithUserMetrics() {
String parentQueueName = "root";
String leafQueueName = "root.leaf";
String user = "alice";
QueueMetrics parentMetrics = QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).from.getMetrics());
QueueMetrics metrics = QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
MetricsSource queueSource = queueSource(ms, leafQueueName);
AppSchedulingInfo app = mockApp(user);
metrics.submitApp(user);
MetricsSource userSource = userSource(ms, leafQueueName, user);
MetricsSource parentUserSource = userSource(ms, parentQueueName, user);
checkApps(queueSource, 1, 0, 0, 0, 0, 0, true);
checkApps(parentQueueSource, 1, 0, 0, 0, 0, 0, true);
checkApps(userSource, 1, 0, 0, 0, 0, 0, true);
checkApps(parentUserSource, 1, 0, 0, 0, 0, 0, true);
metrics.submitAppAttempt(user);
checkApps(queueSource, 1, 1, 0, 0, 0, 0, true);
checkApps(parentQueueSource, 1, 1, 0, 0, 0, 0, true);
checkApps(userSource, 1, 1, 0, 0, 0, 0, true);
checkApps(parentUserSource, 1, 1, 0, 0, 0, 0, true);
parentMetrics.setAvailableResourcesToQueue(Resources.createResource(100 * GB, 100));
metrics.setAvailableResourcesToQueue(Resources.createResource(100 * GB, 100));
parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10 * GB, 10));
metrics.setAvailableResourcesToUser(user, Resources.createResource(10 * GB, 10));
metrics.incrPendingResources(user, 5, Resources.createResource(3 * GB, 3));
checkResources(queueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 15 * GB, 15, 5, 0, 0, 0);
checkResources(parentQueueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 15 * GB, 15, 5, 0, 0, 0);
checkResources(userSource, 0, 0, 0, 0, 0, 10 * GB, 10, 15 * GB, 15, 5, 0, 0, 0);
checkResources(parentUserSource, 0, 0, 0, 0, 0, 10 * GB, 10, 15 * GB, 15, 5, 0, 0, 0);
metrics.runAppAttempt(app.getApplicationId(), user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0, true);
checkApps(userSource, 1, 0, 1, 0, 0, 0, true);
metrics.allocateResources(user, 3, Resources.createResource(2 * GB, 2), true);
metrics.reserveResource(user, Resources.createResource(3 * GB, 3));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
checkResources(queueSource, 6 * GB, 6, 3, 3, 0, 100 * GB, 100, 9 * GB, 9, 2, 3 * GB, 3, 1);
checkResources(parentQueueSource, 6 * GB, 6, 3, 3, 0, 100 * GB, 100, 9 * GB, 9, 2, 3 * GB, 3, 1);
checkResources(userSource, 6 * GB, 6, 3, 3, 0, 10 * GB, 10, 9 * GB, 9, 2, 3 * GB, 3, 1);
checkResources(parentUserSource, 6 * GB, 6, 3, 3, 0, 10 * GB, 10, 9 * GB, 9, 2, 3 * GB, 3, 1);
metrics.releaseResources(user, 1, Resources.createResource(2 * GB, 2));
metrics.unreserveResource(user, Resources.createResource(3 * GB, 3));
checkResources(queueSource, 4 * GB, 4, 2, 3, 1, 100 * GB, 100, 9 * GB, 9, 2, 0, 0, 0);
checkResources(parentQueueSource, 4 * GB, 4, 2, 3, 1, 100 * GB, 100, 9 * GB, 9, 2, 0, 0, 0);
checkResources(userSource, 4 * GB, 4, 2, 3, 1, 10 * GB, 10, 9 * GB, 9, 2, 0, 0, 0);
checkResources(parentUserSource, 4 * GB, 4, 2, 3, 1, 10 * GB, 10, 9 * GB, 9, 2, 0, 0, 0);
metrics.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser());
checkApps(queueSource, 1, 0, 0, 0, 0, 0, true);
checkApps(parentQueueSource, 1, 0, 0, 0, 0, 0, true);
checkApps(userSource, 1, 0, 0, 0, 0, 0, true);
checkApps(parentUserSource, 1, 0, 0, 0, 0, 0, true);
metrics.finishApp(user, RMAppState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0, true);
checkApps(parentQueueSource, 1, 0, 0, 1, 0, 0, true);
checkApps(userSource, 1, 0, 0, 1, 0, 0, true);
checkApps(parentUserSource, 1, 0, 0, 1, 0, 0, true);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hbase by apache.
the class TestReplicationEndpoint method testMetricsSourceBaseSourcePassthrough.
@Test
public void testMetricsSourceBaseSourcePassthrough() {
/*
The replication MetricsSource wraps a MetricsReplicationSourceSourceImpl
and a MetricsReplicationGlobalSourceSource, so that metrics get written to both namespaces.
Both of those classes wrap a MetricsReplicationSourceImpl that implements BaseSource, which
allows for custom JMX metrics.
This test checks to make sure the BaseSource decorator logic on MetricsSource actually calls down through
the two layers of wrapping to the actual BaseSource.
*/
String id = "id";
DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class);
MetricsReplicationSourceImpl singleRms = mock(MetricsReplicationSourceImpl.class);
when(singleRms.getMetricsRegistry()).thenReturn(mockRegistry);
MetricsReplicationSourceImpl globalRms = mock(MetricsReplicationSourceImpl.class);
when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry);
MetricsReplicationSourceSource singleSourceSource = new MetricsReplicationSourceSourceImpl(singleRms, id);
MetricsReplicationSourceSource globalSourceSource = new MetricsReplicationGlobalSourceSource(globalRms);
MetricsSource source = new MetricsSource(id, singleSourceSource, globalSourceSource);
String gaugeName = "gauge";
String singleGaugeName = "source.id." + gaugeName;
long delta = 1;
String counterName = "counter";
String singleCounterName = "source.id." + counterName;
long count = 2;
source.decGauge(gaugeName, delta);
source.getMetricsContext();
source.getMetricsDescription();
source.getMetricsJmxContext();
source.getMetricsName();
source.incCounters(counterName, count);
source.incGauge(gaugeName, delta);
source.init();
source.removeMetric(gaugeName);
source.setGauge(gaugeName, delta);
source.updateHistogram(counterName, count);
verify(singleRms).decGauge(singleGaugeName, delta);
verify(globalRms).decGauge(gaugeName, delta);
verify(globalRms).getMetricsContext();
verify(globalRms).getMetricsJmxContext();
verify(globalRms).getMetricsName();
verify(singleRms).incCounters(singleCounterName, count);
verify(globalRms).incCounters(counterName, count);
verify(singleRms).incGauge(singleGaugeName, delta);
verify(globalRms).incGauge(gaugeName, delta);
verify(globalRms).init();
verify(singleRms).removeMetric(singleGaugeName);
verify(globalRms).removeMetric(gaugeName);
verify(singleRms).setGauge(singleGaugeName, delta);
verify(globalRms).setGauge(gaugeName, delta);
verify(singleRms).updateHistogram(singleCounterName, count);
verify(globalRms).updateHistogram(counterName, count);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hive by apache.
the class LlapDaemonExecutorMetrics method create.
public static LlapDaemonExecutorMetrics create(String displayName, String sessionId, int numExecutors, final int[] intervals) {
MetricsSystem ms = LlapMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(MetricsUtils.METRICS_PROCESS_NAME, sessionId, ms);
return ms.register(displayName, "LlapDaemon Executor Metrics", new LlapDaemonExecutorMetrics(displayName, jm, sessionId, numExecutors, intervals));
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestQueueMetrics method testNodeTypeMetrics.
@Test
public void testNodeTypeMetrics() {
String parentQueueName = "root";
String leafQueueName = "root.leaf";
String user = "alice";
QueueMetrics parentMetrics = QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).from.getMetrics());
QueueMetrics metrics = QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
MetricsSource queueSource = queueSource(ms, leafQueueName);
//AppSchedulingInfo app = mockApp(user);
metrics.submitApp(user);
MetricsSource userSource = userSource(ms, leafQueueName, user);
MetricsSource parentUserSource = userSource(ms, parentQueueName, user);
metrics.incrNodeTypeAggregations(user, NodeType.NODE_LOCAL);
checkAggregatedNodeTypes(queueSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(userSource, 1L, 0L, 0L);
checkAggregatedNodeTypes(parentUserSource, 1L, 0L, 0L);
metrics.incrNodeTypeAggregations(user, NodeType.RACK_LOCAL);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 0L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 0L);
metrics.incrNodeTypeAggregations(user, NodeType.OFF_SWITCH);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 1L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 1L);
metrics.incrNodeTypeAggregations(user, NodeType.OFF_SWITCH);
checkAggregatedNodeTypes(queueSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(parentQueueSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(userSource, 1L, 1L, 2L);
checkAggregatedNodeTypes(parentUserSource, 1L, 1L, 2L);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method testSummaryRead.
@Test
public void testSummaryRead() throws Exception {
// Load data
EntityGroupFSTimelineStore.AppLogs appLogs = store.new AppLogs(mainTestAppId, mainTestAppDirPath, AppState.COMPLETED);
MutableCounterLong summaryLogEntityRead = store.metrics.getGetEntityToSummaryOps();
long numEntityReadBefore = summaryLogEntityRead.value();
TimelineDataManager tdm = PluginStoreTestUtils.getTdmWithStore(config, store);
appLogs.scanForLogs();
appLogs.parseSummaryLogs(tdm);
// Verify single entity read
PluginStoreTestUtils.verifyTestEntities(tdm);
// Verify multiple entities read
TimelineEntities entities = tdm.getEntities("type_1", null, null, null, null, null, null, null, EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
assertEquals(entities.getEntities().size(), 1);
for (TimelineEntity entity : entities.getEntities()) {
assertEquals((Long) 123L, entity.getStartTime());
}
// Verify metrics
assertEquals(numEntityReadBefore + 5L, summaryLogEntityRead.value());
}
Aggregations