use of org.apache.commons.configuration2.SubsetConfiguration in project hadoop by apache.
the class TestKafkaMetrics method testPutMetrics.
@Test
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testPutMetrics() throws Exception {
// Create a record by mocking MetricsRecord class.
MetricsRecord record = mock(MetricsRecord.class);
when(record.tags()).thenReturn(Lists.newArrayList(new MetricsTag(KafkaMetricsInfo.KafkaTag, "test_tag")));
when(record.timestamp()).thenReturn(System.currentTimeMillis());
// Create a metric using AbstractMetric class.
AbstractMetric metric = new AbstractMetric(KafkaMetricsInfo.KafkaCounter) {
@Override
public Number value() {
return new Integer(123);
}
@Override
public MetricType type() {
return null;
}
@Override
public void visit(MetricsVisitor visitor) {
}
};
// Create a list of metrics.
Iterable<AbstractMetric> metrics = Lists.newArrayList(metric);
when(record.name()).thenReturn("Kafka record name");
when(record.metrics()).thenReturn(metrics);
SubsetConfiguration conf = mock(SubsetConfiguration.class);
when(conf.getString(KafkaSink.BROKER_LIST)).thenReturn("localhost:9092");
String topic = "myTestKafkaTopic";
when(conf.getString(KafkaSink.TOPIC)).thenReturn(topic);
// Create the KafkaSink object and initialize it.
kafkaSink = new KafkaSink();
kafkaSink.init(conf);
// Create a mock KafkaProducer as a producer for KafkaSink.
Producer<Integer, byte[]> mockProducer = mock(KafkaProducer.class);
kafkaSink.setProducer(mockProducer);
// Create the json object from the record.
StringBuilder jsonLines = recordToJson(record);
if (LOG.isDebugEnabled()) {
LOG.debug("kafka message: " + jsonLines.toString());
}
// Send the record and store the result in a mock Future.
Future<RecordMetadata> f = mock(Future.class);
when(mockProducer.send((ProducerRecord) anyObject())).thenReturn(f);
kafkaSink.putMetrics(record);
// Get the argument and verity it.
ArgumentCaptor<ProducerRecord> argument = ArgumentCaptor.forClass(ProducerRecord.class);
verify(mockProducer).send(argument.capture());
// Compare the received data with the original one.
ProducerRecord<Integer, byte[]> data = (argument.getValue());
String jsonResult = new String(data.value());
if (LOG.isDebugEnabled()) {
LOG.debug("kafka result: " + jsonResult);
}
assertEquals(jsonLines.toString(), jsonResult);
}
use of org.apache.commons.configuration2.SubsetConfiguration in project hadoop by apache.
the class TestRollingFileSystemSink method testGetRollInterval.
/**
* Test whether the roll interval is correctly calculated from the
* configuration settings.
*/
@Test
public void testGetRollInterval() {
doTestGetRollInterval(1, new String[] { "m", "min", "minute", "minutes" }, 60 * 1000L);
doTestGetRollInterval(1, new String[] { "h", "hr", "hour", "hours" }, 60 * 60 * 1000L);
doTestGetRollInterval(1, new String[] { "d", "day", "days" }, 24 * 60 * 60 * 1000L);
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf = builder.add("sink.roll-interval", "1").subset("sink");
// We can reuse the same sink evry time because we're setting the same
// property every time.
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals(3600000L, sink.getRollInterval());
for (char c : "abcefgijklnopqrtuvwxyz".toCharArray()) {
builder = new ConfigBuilder();
conf = builder.add("sink.roll-interval", "90 " + c).subset("sink");
try {
sink.init(conf);
sink.getRollInterval();
fail("Allowed flush interval with bad units: " + c);
} catch (MetricsException ex) {
// Expected
}
}
}
use of org.apache.commons.configuration2.SubsetConfiguration in project hadoop by apache.
the class TestRollingFileSystemSink method testInit.
@Test
public void testInit() {
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf = builder.add("sink.roll-interval", "10m").add("sink.roll-offset-interval-millis", "1").add("sink.basepath", "path").add("sink.ignore-error", "true").add("sink.allow-append", "true").add("sink.source", "src").subset("sink");
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals("The roll interval was not set correctly", sink.rollIntervalMillis, 600000);
assertEquals("The roll offset interval was not set correctly", sink.rollOffsetIntervalMillis, 1);
assertEquals("The base path was not set correctly", sink.basePath, new Path("path"));
assertEquals("ignore-error was not set correctly", sink.ignoreError, true);
assertEquals("allow-append was not set correctly", sink.allowAppend, true);
assertEquals("The source was not set correctly", sink.source, "src");
}
use of org.apache.commons.configuration2.SubsetConfiguration in project hadoop by apache.
the class TestGangliaSink method testShouldCreateDatagramSocketByDefault.
@Test
public void testShouldCreateDatagramSocketByDefault() throws Exception {
SubsetConfiguration conf = new ConfigBuilder().subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
use of org.apache.commons.configuration2.SubsetConfiguration in project hadoop by apache.
the class TestGangliaSink method testShouldCreateDatagramSocketIfMulticastIsDisabled.
@Test
public void testShouldCreateDatagramSocketIfMulticastIsDisabled() throws Exception {
SubsetConfiguration conf = new ConfigBuilder().add("test.sink.ganglia.multicast", false).subset("test.sink.ganglia");
GangliaSink30 gangliaSink = new GangliaSink30();
gangliaSink.init(conf);
DatagramSocket socket = gangliaSink.getDatagramSocket();
assertFalse("Did not create DatagramSocket", socket == null || socket instanceof MulticastSocket);
}
Aggregations