use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestReadWriteDiskValidator method testCheckFailures.
@Test
public void testCheckFailures() throws Throwable {
ReadWriteDiskValidator readWriteDiskValidator = (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(ReadWriteDiskValidator.NAME);
// create a temporary test directory under the system test directory
File testDir = Files.createTempDirectory(Paths.get(System.getProperty("test.build.data")), "test").toFile();
try {
Shell.execCommand(Shell.getSetPermissionCommand("000", false, testDir.getAbsolutePath()));
} catch (Exception e) {
testDir.delete();
throw e;
}
try {
readWriteDiskValidator.checkStatus(testDir);
fail("Disk check should fail.");
} catch (DiskErrorException e) {
assertTrue(e.getMessage().equals("Disk Check failed!"));
}
MetricsSource source = ms.getSource(ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
MetricsCollectorImpl collector = new MetricsCollectorImpl();
source.getMetrics(collector, true);
try {
readWriteDiskValidator.checkStatus(testDir);
fail("Disk check should fail.");
} catch (DiskErrorException e) {
assertTrue(e.getMessage().equals("Disk Check failed!"));
}
source.getMetrics(collector, true);
// verify the first metrics record
MetricsRecords.assertMetric(collector.getRecords().get(0), "FailureCount", 1);
Long lastFailureTime1 = (Long) MetricsRecords.getMetricValueByName(collector.getRecords().get(0), "LastFailureTime");
// verify the second metrics record
MetricsRecords.assertMetric(collector.getRecords().get(1), "FailureCount", 2);
Long lastFailureTime2 = (Long) MetricsRecords.getMetricValueByName(collector.getRecords().get(1), "LastFailureTime");
assertTrue("The first failure time should be less than the second one", lastFailureTime1 < lastFailureTime2);
testDir.delete();
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestCheckpoint method testMultipleSecondaryNNsAgainstSameNN.
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}
* since that test runs against two distinct NNs.
*
* This case tests the following interleaving:
* - 2NN A downloads image (up to txid 2)
* - 2NN A about to save its own checkpoint
* - 2NN B downloads image (up to txid 4)
* - 2NN B uploads checkpoint (txid 4)
* - 2NN A uploads checkpoint (txid 2)
*
* It verifies that this works even though the earlier-txid checkpoint gets
* uploaded after the later-txid checkpoint.
*/
@Test
public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null, secondary2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
// Start 2NNs
secondary1 = startSecondaryNameNode(conf, 1);
secondary2 = startSecondaryNameNode(conf, 2);
// Make the first 2NN's checkpoint process delayable - we can pause it
// right before it saves its checkpoint image.
CheckpointStorage spyImage1 = spyOnSecondaryImage(secondary1);
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spyImage1).saveFSImageInAllDirs(Mockito.<FSNamesystem>any(), Mockito.anyLong());
// Set up a thread to do a checkpoint from the first 2NN
DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1);
checkpointThread.start();
// Wait for the first checkpointer to get to where it should save its image.
delayer.waitForCall();
// Now make the second checkpointer run an entire checkpoint
secondary2.doCheckpoint();
// Let the first one finish
delayer.proceed();
// It should have succeeded even though another checkpoint raced with it.
checkpointThread.join();
checkpointThread.propagateExceptions();
// primary should record "last checkpoint" as the higher txid (even though
// a checkpoint with a lower txid finished most recently)
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
assertEquals(4, storage.getMostRecentCheckpointTxId());
// Should have accepted both checkpoints
assertNNHasCheckpoints(cluster, ImmutableList.of(2, 4));
// Now have second one checkpoint one more time just to make sure that
// the NN isn't left in a broken state
secondary2.doCheckpoint();
// NN should have received new checkpoint
assertEquals(6, storage.getMostRecentCheckpointTxId());
// Validate invariant that files named the same are the same.
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
// NN should have removed the checkpoint at txid 2 at this point, but has
// one at txid 6
assertNNHasCheckpoints(cluster, ImmutableList.of(4, 6));
} finally {
cleanup(secondary1);
secondary1 = null;
cleanup(secondary2);
secondary2 = null;
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestRollingFileSystemSinkWithHdfs method testFlushThread.
/**
* This test specifically checks whether the flusher thread is automatically
* flushing the files. It unfortunately can only test with the alternative
* flushing schedule (because of test timing), but it's better than nothing.
*
* @throws Exception thrown if something breaks
*/
@Test
public void testFlushThread() throws Exception {
// Cause the sink's flush thread to be run immediately after the second
// metrics log is written
RollingFileSystemSink.forceFlush = true;
String path = "hdfs://" + cluster.getNameNode().getHostAndPort() + "/tmp";
MetricsSystem ms = initMetricsSystem(path, true, false, false);
new MyMetrics1().registerWith(ms);
// Publish the metrics
ms.publishMetricsNow();
// Pubish again because the first write seems to get properly flushed
// regardless.
ms.publishMetricsNow();
int count = 0;
try {
// sleep, but the sleep is here to make sure this test isn't flakey.
while (!RollingFileSystemSink.hasFlushed) {
Thread.sleep(10L);
if (++count > 1000) {
fail("Flush thread did not run within 10 seconds");
}
}
Calendar now = Calendar.getInstance();
Path currentDir = new Path(path, DATE_FORMAT.format(now.getTime()) + "00");
FileSystem fs = FileSystem.newInstance(new URI(path), new Configuration());
Path currentFile = findMostRecentLogFile(fs, new Path(currentDir, getLogFilename()));
FileStatus status = fs.getFileStatus(currentFile);
// Each metrics record is 118+ bytes, depending on hostname
assertTrue("The flusher thread didn't flush the log contents. Expected " + "at least 236 bytes in the log file, but got " + status.getLen(), status.getLen() >= 236);
} finally {
RollingFileSystemSink.forceFlush = false;
try {
ms.stop();
} finally {
ms.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestKafkaMetrics method recordToJson.
StringBuilder recordToJson(MetricsRecord record) {
// Create a json object from a metrics record.
StringBuilder jsonLines = new StringBuilder();
Long timestamp = record.timestamp();
Date currDate = new Date(timestamp);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
String date = dateFormat.format(currDate);
SimpleDateFormat timeFormat = new SimpleDateFormat("hh:mm:ss");
String time = timeFormat.format(currDate);
String hostname = new String("null");
try {
hostname = InetAddress.getLocalHost().getHostName();
} catch (Exception e) {
LOG.warn("Error getting Hostname, going to continue");
}
jsonLines.append("{\"hostname\": \"" + hostname);
jsonLines.append("\", \"timestamp\": " + timestamp);
jsonLines.append(", \"date\": \"" + date);
jsonLines.append("\",\"time\": \"" + time);
jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
for (MetricsTag tag : record.tags()) {
jsonLines.append(", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + tag.value().toString() + "\"");
}
for (AbstractMetric m : record.metrics()) {
jsonLines.append(", \"" + m.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + m.value().toString() + "\"");
}
jsonLines.append("}");
return jsonLines;
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestKafkaMetrics method testPutMetrics.
@Test
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testPutMetrics() throws Exception {
// Create a record by mocking MetricsRecord class.
MetricsRecord record = mock(MetricsRecord.class);
when(record.tags()).thenReturn(Lists.newArrayList(new MetricsTag(KafkaMetricsInfo.KafkaTag, "test_tag")));
when(record.timestamp()).thenReturn(System.currentTimeMillis());
// Create a metric using AbstractMetric class.
AbstractMetric metric = new AbstractMetric(KafkaMetricsInfo.KafkaCounter) {
@Override
public Number value() {
return new Integer(123);
}
@Override
public MetricType type() {
return null;
}
@Override
public void visit(MetricsVisitor visitor) {
}
};
// Create a list of metrics.
Iterable<AbstractMetric> metrics = Lists.newArrayList(metric);
when(record.name()).thenReturn("Kafka record name");
when(record.metrics()).thenReturn(metrics);
SubsetConfiguration conf = mock(SubsetConfiguration.class);
when(conf.getString(KafkaSink.BROKER_LIST)).thenReturn("localhost:9092");
String topic = "myTestKafkaTopic";
when(conf.getString(KafkaSink.TOPIC)).thenReturn(topic);
// Create the KafkaSink object and initialize it.
kafkaSink = new KafkaSink();
kafkaSink.init(conf);
// Create a mock KafkaProducer as a producer for KafkaSink.
Producer<Integer, byte[]> mockProducer = mock(KafkaProducer.class);
kafkaSink.setProducer(mockProducer);
// Create the json object from the record.
StringBuilder jsonLines = recordToJson(record);
if (LOG.isDebugEnabled()) {
LOG.debug("kafka message: " + jsonLines.toString());
}
// Send the record and store the result in a mock Future.
Future<RecordMetadata> f = mock(Future.class);
when(mockProducer.send((ProducerRecord) anyObject())).thenReturn(f);
kafkaSink.putMetrics(record);
// Get the argument and verity it.
ArgumentCaptor<ProducerRecord> argument = ArgumentCaptor.forClass(ProducerRecord.class);
verify(mockProducer).send(argument.capture());
// Compare the received data with the original one.
ProducerRecord<Integer, byte[]> data = (argument.getValue());
String jsonResult = new String(data.value());
if (LOG.isDebugEnabled()) {
LOG.debug("kafka result: " + jsonResult);
}
assertEquals(jsonLines.toString(), jsonResult);
}
Aggregations