use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestFSNamesystemMBean method testWithFSNamesystemWriteLock.
// The test makes sure JMX request can be processed even if namesystem's
// writeLock is owned by another thread.
@Test
public void testWithFSNamesystemWriteLock() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
FSNamesystem fsn = null;
int jmxCachePeriod = 1;
new ConfigBuilder().add("namenode.period", jmxCachePeriod).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-namenode"));
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fsn = cluster.getNameNode().namesystem;
fsn.writeLock();
Thread.sleep(jmxCachePeriod * 1000);
MBeanClient client = new MBeanClient();
client.start();
client.join(20000);
assertTrue("JMX calls are blocked when FSNamesystem's writerlock" + "is owned by another thread", client.succeeded);
client.interrupt();
} finally {
if (fsn != null && fsn.hasWriteLock()) {
fsn.writeUnlock();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestGangliaMetrics method testGangliaMetrics2.
@Test
public void testGangliaMetrics2() throws Exception {
// Setting long interval to avoid periodic publishing.
// We manually publish metrics by MeticsSystem#publishMetricsNow here.
ConfigBuilder cb = new ConfigBuilder().add("*.period", 120).add(testNamePrefix + ".sink.gsink30.context", // filter out only "test"
testNamePrefix).add(testNamePrefix + ".sink.gsink31.context", // filter out only "test"
testNamePrefix).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-" + testNamePrefix));
MetricsSystemImpl ms = new MetricsSystemImpl(testNamePrefix);
ms.start();
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
final int expectedCountFromGanglia30 = expectedMetrics.length;
final int expectedCountFromGanglia31 = 2 * expectedMetrics.length;
// Setup test for GangliaSink30
AbstractGangliaSink gsink30 = new GangliaSink30();
gsink30.init(cb.subset(testNamePrefix));
MockDatagramSocket mockds30 = new MockDatagramSocket();
GangliaMetricsTestHelper.setDatagramSocket(gsink30, mockds30);
// Setup test for GangliaSink31
AbstractGangliaSink gsink31 = new GangliaSink31();
gsink31.init(cb.subset(testNamePrefix));
MockDatagramSocket mockds31 = new MockDatagramSocket();
GangliaMetricsTestHelper.setDatagramSocket(gsink31, mockds31);
// register the sinks
ms.register("gsink30", "gsink30 desc", gsink30);
ms.register("gsink31", "gsink31 desc", gsink31);
// publish the metrics
ms.publishMetricsNow();
ms.stop();
// check GanfliaSink30 data
checkMetrics(mockds30.getCapturedSend(), expectedCountFromGanglia30);
// check GanfliaSink31 data
checkMetrics(mockds31.getCapturedSend(), expectedCountFromGanglia31);
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestGangliaMetrics method testTagsForPrefix.
@Test
public void testTagsForPrefix() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add(testNamePrefix + ".sink.ganglia.tagsForPrefix.all", "*").add(testNamePrefix + ".sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " + "NumActiveSources").add(testNamePrefix + ".sink.ganglia.tagsForPrefix.none", "");
GangliaSink30 sink = new GangliaSink30();
sink.init(cb.subset(testNamePrefix + ".sink.ganglia"));
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
StringBuilder sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "some"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "none"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestMetricsSystemImpl method testInitFirstVerifyCallBacks.
@Test
public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8).add("test.sink.test.class", TestSink.class.getName()).add("test.*.source.filter.exclude", "s0").add("test.source.s1.metric.filter.exclude", "X*").add("test.sink.sink1.metric.filter.exclude", "Y*").add("test.sink.sink2.metric.filter.exclude", "Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
// publish the metrics
ms.publishMetricsNow();
try {
verify(sink1, timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2, timeout(200).times(2)).putMetrics(r2.capture());
} finally {
ms.stop();
ms.shutdown();
}
//When we call stop, at most two sources will be consumed by each sink thread.
List<MetricsRecord> mr1 = r1.getAllValues();
List<MetricsRecord> mr2 = r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestMetricsSystemImpl method testQSize.
@Test
public void testQSize() throws Exception {
new ConfigBuilder().add("*.period", 8).add("*.queue.capacity", 2).add("test.sink.test.class", TestSink.class.getName()).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
final CountDownLatch proceedSignal = new CountDownLatch(1);
final CountDownLatch reachedPutMetricSignal = new CountDownLatch(1);
ms.start();
try {
MetricsSink slowSink = mock(MetricsSink.class);
MetricsSink dataSink = mock(MetricsSink.class);
ms.registerSink("slowSink", "The sink that will wait on putMetric", slowSink);
ms.registerSink("dataSink", "The sink I'll use to get info about slowSink", dataSink);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
reachedPutMetricSignal.countDown();
proceedSignal.await();
return null;
}
}).when(slowSink).putMetrics(any(MetricsRecord.class));
// trigger metric collection first time
ms.onTimerEvent();
assertTrue(reachedPutMetricSignal.await(1, TimeUnit.SECONDS));
// Now that the slow sink is still processing the first metric,
// its queue length should be 1 for the second collection.
ms.onTimerEvent();
verify(dataSink, timeout(500).times(2)).putMetrics(r1.capture());
List<MetricsRecord> mr = r1.getAllValues();
Number qSize = Iterables.find(mr.get(1).metrics(), new Predicate<AbstractMetric>() {
@Override
public boolean apply(@Nullable AbstractMetric input) {
assert input != null;
return input.name().equals("Sink_slowSinkQsize");
}
}).value();
assertEquals(1, qSize);
} finally {
proceedSignal.countDown();
ms.stop();
}
}
Aggregations