use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestGangliaSink method testShouldCreateDatagramSocketIfMulticastIsDisabled.
@Test
public void testShouldCreateDatagramSocketIfMulticastIsDisabled() throws Exception {
SubsetConfiguration conf = new ConfigBuilder().add("test.sink.ganglia.multicast", false).subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestPatternFilter method includeShouldOverrideExclude.
/**
* Include patterns should take precedence over exclude patterns
*/
@Test
public void includeShouldOverrideExclude() {
SubsetConfiguration c = new ConfigBuilder().add("p.include", "foo").add("p.include.tags", "foo:f").add("p.exclude", "foo").add("p.exclude.tags", "foo:f").subset("p");
shouldAccept(c, "foo");
shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
shouldAccept(c, mockMetricsRecord("foo", Arrays.asList(tag("foo", "", "f"))));
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestPatternFilter method shouldAcceptUnmatchedWhenBothAreConfigured.
/**
* Filters should accepts unmatched item when both include and
* exclude patterns are present.
*/
@Test
public void shouldAcceptUnmatchedWhenBothAreConfigured() {
SubsetConfiguration c = new ConfigBuilder().add("p.include", "foo").add("p.include.tags", "foo:f").add("p.exclude", "bar").add("p.exclude.tags", "bar:b").subset("p");
shouldAccept(c, "foo");
shouldAccept(c, Arrays.asList(tag("foo", "", "f")));
shouldAccept(c, mockMetricsRecord("foo", Arrays.asList(tag("foo", "", "f"))));
shouldReject(c, "bar");
shouldReject(c, Arrays.asList(tag("bar", "", "b")));
shouldReject(c, mockMetricsRecord("bar", Arrays.asList(tag("foo", "", "f"))));
shouldReject(c, mockMetricsRecord("foo", Arrays.asList(tag("bar", "", "b"))));
shouldAccept(c, "foobar");
shouldAccept(c, Arrays.asList(tag("foobar", "", "")));
shouldAccept(c, mockMetricsRecord("foobar", Arrays.asList(tag("foobar", "", ""))));
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestPatternFilter method includeOnlyShouldOnlyIncludeMatched.
/**
* Filters should handle white-listing correctly
*/
@Test
public void includeOnlyShouldOnlyIncludeMatched() {
SubsetConfiguration wl = new ConfigBuilder().add("p.include", "foo").add("p.include.tags", "foo:f").subset("p");
shouldAccept(wl, "foo");
shouldAccept(wl, Arrays.asList(tag("bar", "", ""), tag("foo", "", "f")), new boolean[] { false, true });
shouldAccept(wl, mockMetricsRecord("foo", Arrays.asList(tag("bar", "", ""), tag("foo", "", "f"))));
shouldReject(wl, "bar");
shouldReject(wl, Arrays.asList(tag("bar", "", "")));
shouldReject(wl, Arrays.asList(tag("foo", "", "boo")));
shouldReject(wl, mockMetricsRecord("bar", Arrays.asList(tag("foo", "", "f"))));
shouldReject(wl, mockMetricsRecord("foo", Arrays.asList(tag("bar", "", ""))));
}
use of org.apache.hadoop.metrics2.impl.ConfigBuilder in project hadoop by apache.
the class TestFSNamesystemMBean method testWithFSEditLogLock.
// The test makes sure JMX request can be processed even if FSEditLog
// is synchronized.
@Test
public void testWithFSEditLogLock() throws Exception {
Configuration conf = new Configuration();
int jmxCachePeriod = 1;
new ConfigBuilder().add("namenode.period", jmxCachePeriod).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-namenode"));
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
synchronized (cluster.getNameNode().getFSImage().getEditLog()) {
Thread.sleep(jmxCachePeriod * 1000);
MBeanClient client = new MBeanClient();
client.start();
client.join(20000);
assertTrue("JMX calls are blocked when FSEditLog" + " is synchronized by another thread", client.succeeded);
client.interrupt();
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations