use of org.apache.metron.writer.bolt.BulkMessageWriterBolt in project metron by apache.
the class BulkMessageWriterBoltTest method testFlushOnBatchSize.
@Test
public void testFlushOnBatchSize() throws Exception {
BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl").withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name()).withMessageGetterField("message");
bulkMessageWriterBolt.setCuratorFramework(client);
bulkMessageWriterBolt.setZKCache(cache);
bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType, new FileInputStream(sampleSensorIndexingConfigPath));
bulkMessageWriterBolt.declareOutputFields(declarer);
verify(declarer, times(1)).declareStream(eq("error"), argThat(new FieldsMatcher("message")));
Map stormConf = new HashMap();
doThrow(new Exception()).when(bulkMessageWriter).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
try {
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector);
fail("A runtime exception should be thrown when bulkMessageWriter.init throws an exception");
} catch (RuntimeException e) {
}
reset(bulkMessageWriter);
when(bulkMessageWriter.getName()).thenReturn("hdfs");
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector);
verify(bulkMessageWriter, times(1)).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
tupleList = new ArrayList<>();
messageList = new ArrayList<>();
for (int i = 0; i < 4; i++) {
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(i));
tupleList.add(tuple);
messageList.add(fullMessageList.get(i));
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), eq(messageList));
}
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(4));
tupleList.add(tuple);
messageList.add(fullMessageList.get(4));
BulkWriterResponse response = new BulkWriterResponse();
response.addAllSuccesses(tupleList);
when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)))).thenReturn(response);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
verify(outputCollector, times(5)).ack(tuple);
reset(outputCollector);
doThrow(new Exception()).when(bulkMessageWriter).write(eq(sensorType), any(WriterConfiguration.class), Matchers.anyListOf(Tuple.class), Matchers.anyListOf(JSONObject.class));
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(0));
UnitTestHelper.setLog4jLevel(BulkWriterComponent.class, Level.FATAL);
for (int i = 0; i < 5; i++) {
bulkMessageWriterBolt.execute(tuple);
}
UnitTestHelper.setLog4jLevel(BulkWriterComponent.class, Level.ERROR);
verify(outputCollector, times(5)).ack(tuple);
verify(outputCollector, times(1)).emit(eq(Constants.ERROR_STREAM), any(Values.class));
verify(outputCollector, times(1)).reportError(any(Throwable.class));
}
use of org.apache.metron.writer.bolt.BulkMessageWriterBolt in project metron by apache.
the class BulkMessageWriterBoltTest method testFlushOnTickTuple.
@Test
public void testFlushOnTickTuple() throws Exception {
FakeClock clock = new FakeClock();
BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl").withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name()).withMessageGetterField("message");
bulkMessageWriterBolt.setCuratorFramework(client);
bulkMessageWriterBolt.setZKCache(cache);
bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType, new FileInputStream(sampleSensorIndexingConfigPath));
bulkMessageWriterBolt.declareOutputFields(declarer);
verify(declarer, times(1)).declareStream(eq("error"), argThat(new FieldsMatcher("message")));
Map stormConf = new HashMap();
when(bulkMessageWriter.getName()).thenReturn("elasticsearch");
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector, clock);
verify(bulkMessageWriter, times(1)).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
int batchTimeout = bulkMessageWriterBolt.getDefaultBatchTimeout();
assertEquals(14, batchTimeout);
tupleList = new ArrayList<>();
messageList = new ArrayList<>();
for (int i = 0; i < 3; i++) {
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(i));
tupleList.add(tuple);
messageList.add(fullMessageList.get(i));
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), eq(messageList));
}
when(tuple.getValueByField("message")).thenReturn(null);
// mark the tuple as a TickTuple, part 1 of 2
when(tuple.getSourceComponent()).thenReturn("__system");
// mark the tuple as a TickTuple, part 2 of 2
when(tuple.getSourceStreamId()).thenReturn("__tick");
BulkWriterResponse response = new BulkWriterResponse();
response.addAllSuccesses(tupleList);
when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)))).thenReturn(response);
clock.advanceToSeconds(2);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
// 1 tick
verify(outputCollector, times(1)).ack(tuple);
clock.advanceToSeconds(9);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
assertEquals(3, tupleList.size());
// 3 messages + 2nd tick
verify(outputCollector, times(5)).ack(tuple);
}
use of org.apache.metron.writer.bolt.BulkMessageWriterBolt in project metron by apache.
the class BulkMessageWriterBoltTest method testFlushOnBatchTimeout.
@Test
public void testFlushOnBatchTimeout() throws Exception {
FakeClock clock = new FakeClock();
BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl").withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name()).withMessageGetterField("message").withBatchTimeoutDivisor(3);
bulkMessageWriterBolt.setCuratorFramework(client);
bulkMessageWriterBolt.setZKCache(cache);
bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType, new FileInputStream(sampleSensorIndexingConfigPath));
bulkMessageWriterBolt.declareOutputFields(declarer);
verify(declarer, times(1)).declareStream(eq("error"), argThat(new FieldsMatcher("message")));
Map stormConf = new HashMap();
when(bulkMessageWriter.getName()).thenReturn("elasticsearch");
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector, clock);
verify(bulkMessageWriter, times(1)).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
int batchTimeout = bulkMessageWriterBolt.getDefaultBatchTimeout();
assertEquals(4, batchTimeout);
tupleList = new ArrayList<>();
messageList = new ArrayList<>();
for (int i = 0; i < 3; i++) {
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(i));
tupleList.add(tuple);
messageList.add(fullMessageList.get(i));
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), eq(messageList));
}
clock.elapseSeconds(5);
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(3));
tupleList.add(tuple);
messageList.add(fullMessageList.get(3));
BulkWriterResponse response = new BulkWriterResponse();
response.addAllSuccesses(tupleList);
when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)))).thenReturn(response);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
verify(outputCollector, times(4)).ack(tuple);
}
Aggregations