use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class DataNodeMetricHelper method getMetrics.
/**
* Get metrics helper provides Helper function for
* metrics2 interface to act as a Metric source
*
* @param collector Metrics Collector that is passed in
* @param beanClass The Class that currently impliments the metric functions
* @param context A string that idenitifies the context
*
* @throws IOException
*/
public static void getMetrics(MetricsCollector collector, FSDatasetMBean beanClass, String context) throws IOException {
if (beanClass == null) {
throw new IOException("beanClass cannot be null");
}
String className = beanClass.getClass().getName();
collector.addRecord(className).setContext(context).addGauge(Interns.info("Capacity", "Total storage capacity"), beanClass.getCapacity()).addGauge(Interns.info("DfsUsed", "Total bytes used by dfs datanode"), beanClass.getDfsUsed()).addGauge(Interns.info("Remaining", "Total bytes of free storage"), beanClass.getRemaining()).add(new MetricsTag(Interns.info("StorageInfo", "Storage ID"), beanClass.getStorageInfo())).addGauge(Interns.info("NumFailedVolumes", "Number of failed Volumes" + " in the data Node"), beanClass.getNumFailedVolumes()).addGauge(Interns.info("LastVolumeFailureDate", "Last Volume failure in" + " milliseconds from epoch"), beanClass.getLastVolumeFailureDate()).addGauge(Interns.info("EstimatedCapacityLostTotal", "Total capacity lost" + " due to volume failure"), beanClass.getEstimatedCapacityLostTotal()).addGauge(Interns.info("CacheUsed", "Datanode cache used in bytes"), beanClass.getCacheUsed()).addGauge(Interns.info("CacheCapacity", "Datanode cache capacity"), beanClass.getCacheCapacity()).addGauge(Interns.info("NumBlocksCached", "Datanode number" + " of blocks cached"), beanClass.getNumBlocksCached()).addGauge(Interns.info("NumBlocksFailedToCache", "Datanode number of " + "blocks failed to cache"), beanClass.getNumBlocksFailedToCache()).addGauge(Interns.info("NumBlocksFailedToUnCache", "Datanode number of" + " blocks failed in cache eviction"), beanClass.getNumBlocksFailedToUncache());
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class DataNodePeerMetrics method dumpSendPacketDownstreamAvgInfoAsJson.
/**
* Dump SendPacketDownstreamRollingAvgTime metrics as JSON.
*/
public String dumpSendPacketDownstreamAvgInfoAsJson() {
final MetricsJsonBuilder builder = new MetricsJsonBuilder(null);
sendPacketDownstreamRollingAvgerages.snapshot(builder, true);
return builder.toString();
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestJournalNode method testJournal.
@Test(timeout = 100000)
public void testJournal() throws Exception {
MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
MetricsAsserts.assertGauge("LastJournalTimestamp", 0L, metrics);
long beginTimestamp = System.currentTimeMillis();
IPCLoggerChannel ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
long lastJournalTimestamp = MetricsAsserts.getLongGauge("LastJournalTimestamp", metrics);
assertTrue(lastJournalTimestamp > beginTimestamp);
beginTimestamp = lastJournalTimestamp;
ch.setCommittedTxId(100L);
ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);
lastJournalTimestamp = MetricsAsserts.getLongGauge("LastJournalTimestamp", metrics);
assertTrue(lastJournalTimestamp > beginTimestamp);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestBlockManager method testAsyncIBR.
// spam the block manager with IBRs to verify queuing is occurring.
@Test
public void testAsyncIBR() throws Exception {
Logger.getRootLogger().setLevel(Level.WARN);
// will create files with many small blocks.
final int blkSize = 4 * 1024;
final int fileSize = blkSize * 100;
final byte[] buf = new byte[2 * blkSize];
final int numWriters = 4;
final int repl = 3;
final CyclicBarrier barrier = new CyclicBarrier(numWriters);
final CountDownLatch writeLatch = new CountDownLatch(numWriters);
final AtomicBoolean failure = new AtomicBoolean();
final Configuration conf = new HdfsConfiguration();
conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, blkSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
try {
cluster.waitActive();
// create multiple writer threads to create a file with many blocks.
// will test that concurrent writing causes IBR batching in the NN
Thread[] writers = new Thread[numWriters];
for (int i = 0; i < writers.length; i++) {
final Path p = new Path("/writer" + i);
writers[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream os = fs.create(p, true, buf.length, (short) repl, blkSize);
// align writers for maximum chance of IBR batching.
barrier.await();
int remaining = fileSize;
while (remaining > 0) {
os.write(buf);
remaining -= buf.length;
}
os.close();
} catch (Exception e) {
e.printStackTrace();
failure.set(true);
}
// let main thread know we are done.
writeLatch.countDown();
}
});
writers[i].start();
}
// when and how many IBRs are queued is indeterminate, so just watch
// the metrics and verify something was queued at during execution.
boolean sawQueued = false;
while (!writeLatch.await(10, TimeUnit.MILLISECONDS)) {
assertFalse(failure.get());
MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
long queued = MetricsAsserts.getIntGauge("BlockOpsQueued", rb);
sawQueued |= (queued > 0);
}
assertFalse(failure.get());
assertTrue(sawQueued);
// verify that batching of the IBRs occurred.
MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
long batched = MetricsAsserts.getLongCounter("BlockOpsBatched", rb);
assertTrue(batched > 0);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestStartupProgressMetrics method testRunningState.
@Test
public void testRunningState() {
setStartupProgressForRunningState(startupProgress);
MetricsRecordBuilder builder = getMetrics(metrics, true);
assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
assertGauge("PercentComplete", 0.375f, builder);
assertCounter("LoadingFsImageCount", 100L, builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
assertCounter("LoadingFsImageTotal", 100L, builder);
assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
assertCounter("LoadingEditsCount", 100L, builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
assertCounter("LoadingEditsTotal", 200L, builder);
assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
assertCounter("SavingCheckpointCount", 0L, builder);
assertCounter("SavingCheckpointElapsedTime", 0L, builder);
assertCounter("SavingCheckpointTotal", 0L, builder);
assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
assertCounter("SafeModeCount", 0L, builder);
assertCounter("SafeModeElapsedTime", 0L, builder);
assertCounter("SafeModeTotal", 0L, builder);
assertGauge("SafeModePercentComplete", 0.0f, builder);
}
Aggregations