use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestFairScheduler method testPerfMetricsInited.
@Test
public void testPerfMetricsInited() {
scheduler.init(conf);
scheduler.start();
MetricsCollectorImpl collector = new MetricsCollectorImpl();
scheduler.fsOpDurations.getMetrics(collector, true);
assertEquals("Incorrect number of perf metrics", 1, collector.getRecords().size());
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class ClientSCMMetrics method create.
static ClientSCMMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
ClientSCMMetrics metrics = new ClientSCMMetrics();
ms.register("clientRequests", null, metrics);
return metrics;
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hbase by apache.
the class MetricsRegionAggregateSourceImpl method getMetrics.
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param collector the collector
* @param all get all the metrics regardless of when they last changed.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName);
if (regionSources != null) {
for (MetricsRegionSource regionMetricSource : regionSources) {
if (regionMetricSource instanceof MetricsRegionSourceImpl) {
((MetricsRegionSourceImpl) regionMetricSource).snapshot(mrb, all);
}
}
mrb.addGauge(Interns.info(NUM_REGIONS, NUMBER_OF_REGIONS_DESC), regionSources.size());
metricsRegistry.snapshot(mrb, all);
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hbase by apache.
the class MetricsRegionServerSourceImpl method getMetrics.
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param metricsCollector Collector to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()).addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()).addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()).addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()).addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()).addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize()).addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()).addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), rsWrap.getMaxStoreFileAge()).addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), rsWrap.getMinStoreFileAge()).addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), rsWrap.getAvgStoreFileAge()).addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), rsWrap.getNumReferenceFiles()).addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()).addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize()).addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), rsWrap.getTotalRequestCount()).addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), rsWrap.getReadRequestsCount()).addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()).addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), rsWrap.getWriteRequestsCount()).addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), rsWrap.getRpcGetRequestsCount()).addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), rsWrap.getRpcScanRequestsCount()).addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), rsWrap.getRpcMultiRequestsCount()).addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), rsWrap.getRpcMutateRequestsCount()).addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), rsWrap.getCheckAndMutateChecksFailed()).addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), rsWrap.getCheckAndMutateChecksPassed()).addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), rsWrap.getStoreFileIndexSize()).addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), rsWrap.getTotalStaticIndexSize()).addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), rsWrap.getTotalStaticBloomSize()).addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), rsWrap.getNumMutationsWithoutWAL()).addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), rsWrap.getDataInMemoryWithoutWAL()).addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), rsWrap.getPercentFileLocal()).addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), rsWrap.getPercentFileLocalSecondaryRegions()).addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), rsWrap.getSplitQueueSize()).addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getCompactionQueueSize()).addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, SMALL_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getSmallCompactionQueueSize()).addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, LARGE_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getLargeCompactionQueueSize()).addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getCompactionQueueSize()).addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), rsWrap.getFlushQueueSize()).addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), rsWrap.getBlockCacheFreeSize()).addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), rsWrap.getBlockCacheCount()).addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), rsWrap.getBlockCacheSize()).addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), rsWrap.getBlockCacheHitCount()).addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()).addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), rsWrap.getBlockCacheMissCount()).addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()).addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), rsWrap.getBlockCacheEvictedCount()).addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), rsWrap.getBlockCachePrimaryEvictedCount()).addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitPercent()).addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()).addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), rsWrap.getBlockCacheFailedInsertions()).addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()).addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), rsWrap.getLeafIndexMissCount()).addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), rsWrap.getBloomChunkMissCount()).addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()).addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), rsWrap.getRootIndexMissCount()).addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), rsWrap.getIntermediateIndexMissCount()).addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), rsWrap.getFileInfoMissCount()).addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), rsWrap.getGeneralBloomMetaMissCount()).addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), rsWrap.getDeleteFamilyBloomMissCount()).addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), rsWrap.getTrailerMissCount()).addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()).addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), rsWrap.getLeafIndexHitCount()).addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), rsWrap.getBloomChunkHitCount()).addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()).addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), rsWrap.getRootIndexHitCount()).addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), rsWrap.getIntermediateIndexHitCount()).addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), rsWrap.getFileInfoHitCount()).addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), rsWrap.getGeneralBloomMetaHitCount()).addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), rsWrap.getDeleteFamilyBloomHitCount()).addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()).addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), rsWrap.getUpdatesBlockedTime()).addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), rsWrap.getFlushedCellsCount()).addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), rsWrap.getCompactedCellsCount()).addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), rsWrap.getMajorCompactedCellsCount()).addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), rsWrap.getFlushedCellsSize()).addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), rsWrap.getCompactedCellsSize()).addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), rsWrap.getMajorCompactedCellsSize()).addCounter(Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()).addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), rsWrap.getCellsCountCompactedToMob()).addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsSizeCompactedFromMob()).addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), rsWrap.getCellsSizeCompactedToMob()).addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), rsWrap.getMobFlushCount()).addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), rsWrap.getMobFlushedCellsCount()).addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), rsWrap.getMobFlushedCellsSize()).addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), rsWrap.getMobScanCellsCount()).addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), rsWrap.getMobScanCellsSize()).addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), rsWrap.getMobFileCacheCount()).addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), rsWrap.getMobFileCacheAccessCount()).addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), rsWrap.getMobFileCacheMissCount()).addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), rsWrap.getMobFileCacheEvictedCount()).addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), rsWrap.getMobFileCacheHitPercent()).addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()).addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), rsWrap.getHedgedReadWins()).addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), rsWrap.getBlockedRequestsCount()).tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), rsWrap.getZookeeperQuorum()).tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()).tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
// source is registered in supers constructor, sometimes called before the whole initialization.
if (metricsAdapter != null) {
// snapshot MetricRegistry as well
metricsAdapter.snapshotAllMetrics(registry, mrb);
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method testPluginRead.
@Test
public void testPluginRead() throws Exception {
// Verify precondition
assertEquals(EntityGroupPlugInForTest.class.getName(), store.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSES));
List<TimelineEntityGroupPlugin> currPlugins = store.getPlugins();
for (TimelineEntityGroupPlugin plugin : currPlugins) {
ClassLoader pluginClassLoader = plugin.getClass().getClassLoader();
assertTrue("Should set up ApplicationClassLoader", pluginClassLoader instanceof ApplicationClassLoader);
URL[] paths = ((URLClassLoader) pluginClassLoader).getURLs();
boolean foundJAR = false;
for (URL path : paths) {
if (path.toString().contains(testJar.getAbsolutePath())) {
foundJAR = true;
}
}
assertTrue("Not found path " + testJar.getAbsolutePath() + " for plugin " + plugin.getClass().getName(), foundJAR);
}
// Load data and cache item, prepare timeline store by making a cache item
EntityGroupFSTimelineStore.AppLogs appLogs = store.new AppLogs(mainTestAppId, mainTestAppDirPath, AppState.COMPLETED);
EntityCacheItem cacheItem = new EntityCacheItem(EntityGroupPlugInForTest.getStandardTimelineGroupId(mainTestAppId), config);
cacheItem.setAppLogs(appLogs);
store.setCachedLogs(EntityGroupPlugInForTest.getStandardTimelineGroupId(mainTestAppId), cacheItem);
MutableCounterLong detailLogEntityRead = store.metrics.getGetEntityToDetailOps();
MutableStat cacheRefresh = store.metrics.getCacheRefresh();
long numEntityReadBefore = detailLogEntityRead.value();
long cacheRefreshBefore = cacheRefresh.lastStat().numSamples();
// Generate TDM
TimelineDataManager tdm = PluginStoreTestUtils.getTdmWithStore(config, store);
// Verify single entity read
TimelineEntity entity3 = tdm.getEntity("type_3", mainTestAppId.toString(), EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
assertNotNull(entity3);
assertEquals(entityNew.getStartTime(), entity3.getStartTime());
// Verify multiple entities read
NameValuePair primaryFilter = new NameValuePair(EntityGroupPlugInForTest.APP_ID_FILTER_NAME, mainTestAppId.toString());
TimelineEntities entities = tdm.getEntities("type_3", primaryFilter, null, null, null, null, null, null, EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
assertEquals(1, entities.getEntities().size());
for (TimelineEntity entity : entities.getEntities()) {
assertEquals(entityNew.getStartTime(), entity.getStartTime());
}
// Verify metrics
assertEquals(numEntityReadBefore + 2L, detailLogEntityRead.value());
assertEquals(cacheRefreshBefore + 1L, cacheRefresh.lastStat().numSamples());
}
Aggregations