use of org.apache.storm.task.TopologyContext in project nifi by apache.
the class TestNiFiBolt method testTickTupleWhenExceedingBatchInterval.
@Test
public void testTickTupleWhenExceedingBatchInterval() throws InterruptedException {
final int batchInterval = 1;
final NiFiBolt bolt = new TestableNiFiBolt(siteToSiteClientConfig, niFiDataPacketBuilder, tickFrequency).withBatchInterval(batchInterval);
// prepare the bolt
Map conf = mock(Map.class);
TopologyContext context = mock(TopologyContext.class);
OutputCollector collector = mock(OutputCollector.class);
bolt.prepare(conf, context, collector);
// process a regular tuple
Tuple dataTuple = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple);
// sleep so we pass the batch interval
Thread.sleep(batchInterval + 1000);
// process a tick tuple
Tuple tickTuple = MockTupleHelpers.mockTickTuple();
bolt.execute(tickTuple);
// should have produced one data packet and acked it
verify(niFiDataPacketBuilder, times(1)).createNiFiDataPacket(eq(dataTuple));
verify(collector, times(1)).ack(eq(dataTuple));
}
use of org.apache.storm.task.TopologyContext in project nifi by apache.
the class TestNiFiBolt method testBatchSize.
@Test
public void testBatchSize() {
final int batchSize = 3;
final NiFiBolt bolt = new TestableNiFiBolt(siteToSiteClientConfig, niFiDataPacketBuilder, tickFrequency).withBatchSize(batchSize);
// prepare the bolt
Map conf = mock(Map.class);
TopologyContext context = mock(TopologyContext.class);
OutputCollector collector = mock(OutputCollector.class);
bolt.prepare(conf, context, collector);
// process a regular tuple, haven't hit batch size yet
Tuple dataTuple1 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple1);
verifyZeroInteractions(niFiDataPacketBuilder);
// process a regular tuple, haven't hit batch size yet
Tuple dataTuple2 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple2);
verifyZeroInteractions(niFiDataPacketBuilder);
// process a regular tuple, triggers batch size
Tuple dataTuple3 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple3);
verify(niFiDataPacketBuilder, times(batchSize)).createNiFiDataPacket(any(Tuple.class));
verify(collector, times(batchSize)).ack(any(Tuple.class));
}
use of org.apache.storm.task.TopologyContext in project nifi by apache.
the class TestNiFiBolt method testFailure.
@Test
public void testFailure() throws IOException {
final int batchSize = 3;
final NiFiBolt bolt = new TestableNiFiBolt(siteToSiteClientConfig, niFiDataPacketBuilder, tickFrequency).withBatchSize(batchSize);
when(((TestableNiFiBolt) bolt).transaction.complete()).thenThrow(new RuntimeException("Could not complete transaction"));
// prepare the bolt
Map conf = mock(Map.class);
TopologyContext context = mock(TopologyContext.class);
OutputCollector collector = mock(OutputCollector.class);
bolt.prepare(conf, context, collector);
// process a regular tuple, haven't hit batch size yet
Tuple dataTuple1 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple1);
verifyZeroInteractions(niFiDataPacketBuilder);
// process a regular tuple, haven't hit batch size yet
Tuple dataTuple2 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple2);
verifyZeroInteractions(niFiDataPacketBuilder);
// process a regular tuple, triggers batch size
Tuple dataTuple3 = MockTupleHelpers.mockTuple("nifi", "nifi");
bolt.execute(dataTuple3);
verify(niFiDataPacketBuilder, times(batchSize)).createNiFiDataPacket(any(Tuple.class));
verify(collector, times(batchSize)).fail(any(Tuple.class));
}
use of org.apache.storm.task.TopologyContext in project metron by apache.
the class BulkMessageWriterBoltTest method testFlushOnBatchSize.
@Test
public void testFlushOnBatchSize() throws Exception {
BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl").withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name()).withMessageGetterField("message");
bulkMessageWriterBolt.setCuratorFramework(client);
bulkMessageWriterBolt.setZKCache(cache);
bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType, new FileInputStream(sampleSensorIndexingConfigPath));
bulkMessageWriterBolt.declareOutputFields(declarer);
verify(declarer, times(1)).declareStream(eq("error"), argThat(new FieldsMatcher("message")));
Map stormConf = new HashMap();
doThrow(new Exception()).when(bulkMessageWriter).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
try {
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector);
fail("A runtime exception should be thrown when bulkMessageWriter.init throws an exception");
} catch (RuntimeException e) {
}
reset(bulkMessageWriter);
when(bulkMessageWriter.getName()).thenReturn("hdfs");
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector);
verify(bulkMessageWriter, times(1)).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
tupleList = new ArrayList<>();
messageList = new ArrayList<>();
for (int i = 0; i < 4; i++) {
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(i));
tupleList.add(tuple);
messageList.add(fullMessageList.get(i));
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), eq(messageList));
}
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(4));
tupleList.add(tuple);
messageList.add(fullMessageList.get(4));
BulkWriterResponse response = new BulkWriterResponse();
response.addAllSuccesses(tupleList);
when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)))).thenReturn(response);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
verify(outputCollector, times(5)).ack(tuple);
reset(outputCollector);
doThrow(new Exception()).when(bulkMessageWriter).write(eq(sensorType), any(WriterConfiguration.class), Matchers.anyListOf(Tuple.class), Matchers.anyListOf(JSONObject.class));
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(0));
UnitTestHelper.setLog4jLevel(BulkWriterComponent.class, Level.FATAL);
for (int i = 0; i < 5; i++) {
bulkMessageWriterBolt.execute(tuple);
}
UnitTestHelper.setLog4jLevel(BulkWriterComponent.class, Level.ERROR);
verify(outputCollector, times(5)).ack(tuple);
verify(outputCollector, times(1)).emit(eq(Constants.ERROR_STREAM), any(Values.class));
verify(outputCollector, times(1)).reportError(any(Throwable.class));
}
use of org.apache.storm.task.TopologyContext in project metron by apache.
the class BulkMessageWriterBoltTest method testFlushOnTickTuple.
@Test
public void testFlushOnTickTuple() throws Exception {
FakeClock clock = new FakeClock();
BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl").withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name()).withMessageGetterField("message");
bulkMessageWriterBolt.setCuratorFramework(client);
bulkMessageWriterBolt.setZKCache(cache);
bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType, new FileInputStream(sampleSensorIndexingConfigPath));
bulkMessageWriterBolt.declareOutputFields(declarer);
verify(declarer, times(1)).declareStream(eq("error"), argThat(new FieldsMatcher("message")));
Map stormConf = new HashMap();
when(bulkMessageWriter.getName()).thenReturn("elasticsearch");
bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector, clock);
verify(bulkMessageWriter, times(1)).init(eq(stormConf), any(TopologyContext.class), any(WriterConfiguration.class));
int batchTimeout = bulkMessageWriterBolt.getDefaultBatchTimeout();
assertEquals(14, batchTimeout);
tupleList = new ArrayList<>();
messageList = new ArrayList<>();
for (int i = 0; i < 3; i++) {
when(tuple.getValueByField("message")).thenReturn(fullMessageList.get(i));
tupleList.add(tuple);
messageList.add(fullMessageList.get(i));
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), eq(messageList));
}
when(tuple.getValueByField("message")).thenReturn(null);
// mark the tuple as a TickTuple, part 1 of 2
when(tuple.getSourceComponent()).thenReturn("__system");
// mark the tuple as a TickTuple, part 2 of 2
when(tuple.getSourceStreamId()).thenReturn("__tick");
BulkWriterResponse response = new BulkWriterResponse();
response.addAllSuccesses(tupleList);
when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)))).thenReturn(response);
clock.advanceToSeconds(2);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(0)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
// 1 tick
verify(outputCollector, times(1)).ack(tuple);
clock.advanceToSeconds(9);
bulkMessageWriterBolt.execute(tuple);
verify(bulkMessageWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList), argThat(new MessageListMatcher(messageList)));
assertEquals(3, tupleList.size());
// 3 messages + 2nd tick
verify(outputCollector, times(5)).ack(tuple);
}
Aggregations