use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.
private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);
final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
DataNodeVolumeMetrics metrics = volume.getMetrics();
MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
LOG.info("flushIoMean : " + metrics.getFlushIoMean());
LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
LOG.info("syncIoMean : " + metrics.getSyncIoMean());
LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
LOG.info("readIoMean : " + metrics.getReadIoMean());
LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
LOG.info("writeIoMean : " + metrics.getWriteIoMean());
LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestDataNodeMetrics method testRoundTripAckMetric.
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test
public void testRoundTripAckMetric() throws Exception {
final int datanodeCount = 2;
final int interval = 1;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
// Slow down the writes to catch the write pipeline
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline = null;
int count = 0;
while (pipeline == null && count < 5) {
pipeline = dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
// Get the head node that should be receiving downstream acks
DatanodeInfo headInfo = pipeline[0];
DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode = datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline", headNode);
// Close the file and wait for the metrics to rollover
Thread.sleep((interval + 1) * 1000);
// Check the ack was received
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
assertTrue("Expected non-zero number of acks", getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s", dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestDataNodeMetrics method testDataNodeTimeSpend.
/**
* This function ensures that writing causes TotalWritetime to increment
* and reading causes totalReadTime to move.
* @throws Exception
*/
@Test(timeout = 120000)
public void testDataNodeTimeSpend() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
final FileSystem fs = cluster.getFileSystem();
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
final DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
final long LONG_FILE_LEN = 1024 * 1024 * 10;
final long startWriteValue = getLongCounter("TotalWriteTime", rb);
final long startReadValue = getLongCounter("TotalReadTime", rb);
final AtomicInteger x = new AtomicInteger(0);
// Lets Metric system update latest metrics
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
x.getAndIncrement();
try {
DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()), LONG_FILE_LEN, (short) 1, Time.monotonicNow());
DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
fs.delete(new Path("/time.txt." + x.get()), true);
} catch (IOException ioe) {
LOG.error("Caught IOException while ingesting DN metrics", ioe);
return false;
}
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
final long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
final long endReadValue = getLongCounter("TotalReadTime", rbNew);
return endWriteValue > startWriteValue && endReadValue > startReadValue;
}
}, 30, 60000);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestShuffleHandler method testShuffleMetrics.
/**
* Validate shuffle connection and input/output metrics.
*
* @throws Exception exception
*/
@Test(timeout = 10000)
public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
ChannelFuture cf = make(stub(ChannelFuture.class).returning(true, false).from.isSuccess());
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(1 * MiB);
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(2 * MiB);
checkShuffleMetrics(ms, 3 * MiB, 0, 0, 2);
sh.metrics.operationComplete(cf);
sh.metrics.operationComplete(cf);
checkShuffleMetrics(ms, 3 * MiB, 1, 1, 0);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestContainerMetrics method testContainerMetricsFlow.
@Test
public void testContainerMetricsFlow() throws InterruptedException {
final String ERR = "Error in number of records";
MetricsCollectorImpl collector = new MetricsCollectorImpl();
ContainerId containerId = mock(ContainerId.class);
ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100, 1);
metrics.recordMemoryUsage(1024);
metrics.getMetrics(collector, true);
assertEquals(ERR, 0, collector.getRecords().size());
Thread.sleep(110);
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
collector.clear();
Thread.sleep(110);
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
collector.clear();
metrics.finished();
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
collector.clear();
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
collector.clear();
Thread.sleep(110);
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
}
Aggregations