use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class TestDataNodeMetrics method testTimeoutMetric.
@Test(timeout = 60000)
public void testTimeoutMetric() throws Exception {
final Configuration conf = new HdfsConfiguration();
final Path path = new Path("/test");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final List<FSDataOutputStream> streams = Lists.newArrayList();
try {
final FSDataOutputStream out = cluster.getFileSystem().create(path, (short) 2);
final DataNodeFaultInjector injector = Mockito.mock(DataNodeFaultInjector.class);
Mockito.doThrow(new IOException("mock IOException")).when(injector).writeBlockAfterFlush();
DataNodeFaultInjector.instance = injector;
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
/* Test the metric. */
final MetricsRecordBuilder dnMetrics = getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
assertCounter("DatanodeNetworkErrors", 1L, dnMetrics);
/* Test JMX datanode network counts. */
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc = mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
final String allDnc = dnc.toString();
assertTrue("expected to see loopback address", allDnc.indexOf("127.0.0.1") >= 0);
assertTrue("expected to see networkErrors", allDnc.indexOf("networkErrors") >= 0);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.instance = new DataNodeFaultInjector();
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class TestDataNodeMetrics method testSendDataPacketMetrics.
@Test
public void testSendDataPacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
// Create and read a 1 byte file
Path tmpfile = new Path("/tmp.txt");
DFSTestUtil.createFile(fs, tmpfile, (long) 1, (short) 1, 1L);
DFSTestUtil.readFile(fs, tmpfile);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
// Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
// signaling the end of the block
assertCounter("SendDataPacketTransferNanosNumOps", (long) 2, rb);
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long) 2, rb);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check that the sendPacket percentiles rolled to non-zero values
String sec = interval + "s";
assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.
the class TestDataNodeMetrics method testReceivePacketMetrics.
@Test
public void testReceivePacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path testFile = new Path("/testFlushNanosMetric.txt");
FSDataOutputStream fout = fs.create(testFile);
fout.write(new byte[1]);
fout.hsync();
fout.close();
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
// Expect two flushes, 1 for the flush that occurs after writing,
// 1 that occurs on closing the data and metadata files.
assertCounter("FlushNanosNumOps", 2L, dnMetrics);
// Expect two syncs, one from the hsync, one on close.
assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check the receivePacket percentiles that should be non-zero
String sec = interval + "s";
assertQuantileGauges("FlushNanos" + sec, dnMetrics);
assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project accumulo by apache.
the class Metrics2ThriftMetrics method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder builder = collector.addRecord(record).setContext(CONTEXT);
registry.snapshot(builder, all);
}
use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project accumulo by apache.
the class Metrics2ReplicationMetrics method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder builder = collector.addRecord(RECORD).setContext(CONTEXT);
snapshot();
registry.snapshot(builder, all);
replicationQueueTimeQuantiles.snapshot(builder, all);
replicationQueueTimeStat.snapshot(builder, all);
}
Aggregations