use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class RollingAverages method snapshot.
@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
if (all || changed()) {
for (final Entry<String, LinkedBlockingDeque<SumAndCount>> entry : averages.entrySet()) {
final String name = entry.getKey();
final MetricsInfo avgInfo = info(String.format(avgInfoNameTemplate, StringUtils.capitalize(name)), String.format(avgInfoDescTemplate, StringUtils.uncapitalize(name)));
double totalSum = 0;
long totalCount = 0;
for (final SumAndCount sumAndCount : entry.getValue()) {
totalCount += sumAndCount.getCount();
totalSum += sumAndCount.getSum();
}
if (totalCount != 0) {
builder.addGauge(avgInfo, totalSum / totalCount);
}
}
if (changed()) {
clearChanged();
}
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class DecayRpcScheduler method addTopNCallerSummary.
// Key: Caller(xyz).Volume and Caller(xyz).Priority
private void addTopNCallerSummary(MetricsRecordBuilder rb) {
TopN topNCallers = getTopCallers(topUsersCount);
Map<Object, Integer> decisions = scheduleCacheRef.get();
final int actualCallerCount = topNCallers.size();
for (int i = 0; i < actualCallerCount; i++) {
NameValuePair entry = topNCallers.poll();
String topCaller = "Caller(" + entry.getName() + ")";
String topCallerVolume = topCaller + ".Volume";
String topCallerPriority = topCaller + ".Priority";
rb.addCounter(Interns.info(topCallerVolume, topCallerVolume), entry.getValue());
Integer priority = decisions.get(entry.getName());
if (priority != null) {
rb.addCounter(Interns.info(topCallerPriority, topCallerPriority), priority);
}
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class DecayRpcScheduler method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
// Metrics2 interface to act as a Metric source
try {
MetricsRecordBuilder rb = collector.addRecord(getClass().getName()).setContext(namespace);
addDecayedCallVolume(rb);
addUniqueIdentityCount(rb);
addTopNCallerSummary(rb);
addAvgResponseTimePerPriority(rb);
addCallVolumePerPriority(rb);
addRawCallVolume(rb);
} catch (Exception e) {
LOG.warn("Exception thrown while metric collection. Exception : " + e.getMessage());
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class MetricsSystemImpl method getMetrics.
@Override
public synchronized void getMetrics(MetricsCollector builder, boolean all) {
MetricsRecordBuilder rb = builder.addRecord(MS_NAME).addGauge(MsInfo.NumActiveSources, sources.size()).addGauge(MsInfo.NumAllSources, allSources.size()).addGauge(MsInfo.NumActiveSinks, sinks.size()).addGauge(MsInfo.NumAllSinks, allSinks.size());
for (MetricsSinkAdapter sa : sinks.values()) {
sa.snapshot(rb, all);
}
registry.snapshot(rb, all);
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class TestCheckpoint method testCheckpoint.
/**
* Tests checkpoint in HDFS.
*/
@Test
public void testCheckpoint() throws IOException {
Path tmpDir = new Path("/tmp_tmp");
Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat");
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
//
// verify that 'format' really blew away all pre-existing files
//
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
//
// Create file1
//
DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replication, seed);
checkFile(fileSys, file1, replication);
for (int i = 0; i < 1000; i++) {
fileSys.mkdirs(tmpDir);
fileSys.delete(tmpDir, true);
}
//
// Take a checkpoint
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounterGt("GetImageNumOps", 0, rb);
assertCounterGt("GetEditNumOps", 0, rb);
assertCounterGt("PutImageNumOps", 0, rb);
assertGaugeGt("GetImageAvgTime", 0.0, rb);
assertGaugeGt("GetEditAvgTime", 0.0, rb);
assertGaugeGt("PutImageAvgTime", 0.0, rb);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// check that file1 still exists
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
// create new file file2
DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize, replication, seed);
checkFile(fileSys, file2, replication);
//
// Take a checkpoint
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
FSDirectory secondaryFsDir = secondary.getFSNamesystem().dir;
INode rootInMap = secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
assertSame(rootInMap, secondaryFsDir.rootDir);
fileSys.delete(tmpDir, true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file2 exists and
// file1 does not exist.
//
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(tmpDir));
try {
// verify that file2 exists
checkFile(fileSys, file2, replication);
} finally {
fileSys.close();
cluster.shutdown();
cluster = null;
}
}
Aggregations