use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestMetricsSystemImpl method testInitFirstVerifyStopInvokedImmediately.
@Test
public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8).add("test.sink.test.class", TestSink.class.getName()).add("test.*.source.filter.exclude", "s0").add("test.source.s1.metric.filter.exclude", "X*").add("test.sink.sink1.metric.filter.exclude", "Y*").add("test.sink.sink2.metric.filter.exclude", "Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
// publish the metrics
ms.publishMetricsNow();
ms.stop();
ms.shutdown();
//When we call stop, at most two sources will be consumed by each sink thread.
verify(sink1, atMost(2)).putMetrics(r1.capture());
List<MetricsRecord> mr1 = r1.getAllValues();
verify(sink2, atMost(2)).putMetrics(r2.capture());
List<MetricsRecord> mr2 = r2.getAllValues();
if (mr1.size() != 0 && mr2.size() != 0) {
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
} else if (mr1.size() != 0) {
checkMetricsRecords(mr1);
} else if (mr2.size() != 0) {
checkMetricsRecords(mr2);
}
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestMetricsSystemImpl method testHangOnSinkRead.
/**
* HADOOP-11932
*/
@Test(timeout = 5000)
public void testHangOnSinkRead() throws Exception {
new ConfigBuilder().add("*.period", 8).add("test.sink.test.class", TestSink.class.getName()).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
try {
CountDownLatch collectingLatch = new CountDownLatch(1);
MetricsSink sink = new TestClosableSink(collectingLatch);
ms.registerSink("closeableSink", "The sink will be used to test closeability", sink);
// trigger metric collection first time
ms.onTimerEvent();
// Make sure that sink is collecting metrics
assertTrue(collectingLatch.await(1, TimeUnit.SECONDS));
} finally {
ms.stop();
}
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestRollingFileSystemSink method testGetRollInterval.
/**
* Test whether the roll interval is correctly calculated from the
* configuration settings.
*/
@Test
public void testGetRollInterval() {
doTestGetRollInterval(1, new String[] { "m", "min", "minute", "minutes" }, 60 * 1000L);
doTestGetRollInterval(1, new String[] { "h", "hr", "hour", "hours" }, 60 * 60 * 1000L);
doTestGetRollInterval(1, new String[] { "d", "day", "days" }, 24 * 60 * 60 * 1000L);
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf = builder.add("sink.roll-interval", "1").subset("sink");
// We can reuse the same sink evry time because we're setting the same
// property every time.
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals(3600000L, sink.getRollInterval());
for (char c : "abcefgijklnopqrtuvwxyz".toCharArray()) {
builder = new ConfigBuilder();
conf = builder.add("sink.roll-interval", "90 " + c).subset("sink");
try {
sink.init(conf);
sink.getRollInterval();
fail("Allowed flush interval with bad units: " + c);
} catch (MetricsException ex) {
// Expected
}
}
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestRollingFileSystemSink method testInit.
@Test
public void testInit() {
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf = builder.add("sink.roll-interval", "10m").add("sink.roll-offset-interval-millis", "1").add("sink.basepath", "path").add("sink.ignore-error", "true").add("sink.allow-append", "true").add("sink.source", "src").subset("sink");
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals("The roll interval was not set correctly", sink.rollIntervalMillis, 600000);
assertEquals("The roll offset interval was not set correctly", sink.rollOffsetIntervalMillis, 1);
assertEquals("The base path was not set correctly", sink.basePath, new Path("path"));
assertEquals("ignore-error was not set correctly", sink.ignoreError, true);
assertEquals("allow-append was not set correctly", sink.allowAppend, true);
assertEquals("The source was not set correctly", sink.source, "src");
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestGangliaSink method testShouldCreateDatagramSocketByDefault.
@Test
public void testShouldCreateDatagramSocketByDefault() throws Exception {
SubsetConfiguration conf = new ConfigBuilder().subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
Aggregations