use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestNameNodeMetrics method tearDown.
@After
public void tearDown() throws Exception {
MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
if (source != null) {
// Run only once since the UGI metrics is cleaned up during teardown
MetricsRecordBuilder rb = getMetrics(source);
assertQuantileGauges("GetGroups1s", rb);
}
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestRollingFileSystemSinkWithHdfs method testFlushThread.
/**
* This test specifically checks whether the flusher thread is automatically
* flushing the files. It unfortunately can only test with the alternative
* flushing schedule (because of test timing), but it's better than nothing.
*
* @throws Exception thrown if something breaks
*/
@Test
public void testFlushThread() throws Exception {
// Cause the sink's flush thread to be run immediately after the second
// metrics log is written
RollingFileSystemSink.forceFlush = true;
String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
MetricsSystem ms = initMetricsSystem(path, true, false, false);
new MyMetrics1().registerWith(ms);
// Publish the metrics
ms.publishMetricsNow();
// Pubish again because the first write seems to get properly flushed
// regardless.
ms.publishMetricsNow();
int count = 0;
try {
// sleep, but the sleep is here to make sure this test isn't flakey.
while (!RollingFileSystemSink.hasFlushed) {
Thread.sleep(10L);
if (++count > 1000) {
fail("Flush thread did not run within 10 seconds");
}
}
Calendar now = Calendar.getInstance();
Path currentDir = new Path(path, DATE_FORMAT.format(now.getTime()) + "00");
FileSystem fs = FileSystem.newInstance(new URI(path), new Configuration());
Path currentFile = findMostRecentLogFile(fs, new Path(currentDir, getLogFilename()));
FileStatus status = fs.getFileStatus(currentFile);
// Each metrics record is 118+ bytes, depending on hostname
assertTrue("The flusher thread didn't flush the log contents. Expected " + "at least 236 bytes in the log file, but got " + status.getLen(), status.getLen() >= 236);
} finally {
RollingFileSystemSink.forceFlush = false;
try {
ms.stop();
} finally {
ms.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestRollingFileSystemSinkWithHdfs method testSilentFailedWrite.
/**
* Test that writing to HDFS fails silently when HDFS is unavailable.
*
* @throws IOException thrown when reading or writing log files
* @throws java.lang.InterruptedException thrown if interrupted
*/
@Test
public void testSilentFailedWrite() throws IOException, InterruptedException {
final String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
MetricsSystem ms = initMetricsSystem(path, true, false);
new MyMetrics1().registerWith(ms);
shutdownHdfs();
MockSink.errored = false;
// publish the metrics
ms.publishMetricsNow();
assertFalse("An exception was generated writing metrics " + "while HDFS was unavailable, even though the sink is set to " + "ignore errors", MockSink.errored);
try {
ms.stop();
} finally {
ms.shutdown();
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestRollingFileSystemSinkWithHdfs method testSilentFailedClose.
/**
* Test that closing a file in HDFS silently fails when HDFS is unavailable.
*
* @throws IOException thrown when reading or writing log files
*/
@Test
public void testSilentFailedClose() throws IOException {
final String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
MetricsSystem ms = initMetricsSystem(path, true, false);
new MyMetrics1().registerWith(ms);
// publish the metrics
ms.publishMetricsNow();
shutdownHdfs();
MockSink.errored = false;
try {
ms.stop();
assertFalse("An exception was generated stopping sink " + "while HDFS was unavailable, even though the sink is set to " + "ignore errors", MockSink.errored);
} finally {
ms.shutdown();
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestRollingFileSystemSinkWithHdfs method testFailedClose.
/**
* Test that closing a file in HDFS fails when HDFS is unavailable.
*
* @throws IOException thrown when reading or writing log files
*/
@Test
public void testFailedClose() throws IOException {
final String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
MetricsSystem ms = initMetricsSystem(path, false, false);
new MyMetrics1().registerWith(ms);
// publish the metrics
ms.publishMetricsNow();
shutdownHdfs();
MockSink.errored = false;
try {
ms.stop();
assertTrue("No exception was generated while stopping sink " + "even though HDFS was unavailable", MockSink.errored);
} catch (MetricsException ex) {
// Expected
} finally {
ms.shutdown();
}
}
Aggregations