use of org.apache.metron.writer.hdfs.HdfsWriter in project metron by apache.
the class BulkMessageWriterBolt method prepare.
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
super.prepare(stormConf, context, collector);
if (messageGetField != null) {
messageGetStrategy = MessageGetters.valueOf(messageGetStrategyType).get(messageGetField);
} else {
messageGetStrategy = MessageGetters.valueOf(messageGetStrategyType).get();
}
if (bulkMessageWriter instanceof WriterToBulkWriter) {
configurationTransformation = WriterToBulkWriter.TRANSFORMATION;
} else {
configurationTransformation = x -> x;
}
ackTuplesPolicy = new AckTuplesPolicy(collector, messageGetStrategy);
try {
WriterConfiguration writerconf = configurationTransformation.apply(getConfigurationStrategy().createWriterConfig(bulkMessageWriter, getConfigurations()));
if (maxBatchTimeout == 0) {
// This means getComponentConfiguration was never called to initialize maxBatchTimeout,
// probably because we are in a unit test scenario. So calculate it here.
BatchTimeoutHelper timeoutHelper = new BatchTimeoutHelper(writerconf::getAllConfiguredTimeouts, batchTimeoutDivisor);
maxBatchTimeout = timeoutHelper.getMaxBatchTimeout();
}
BulkWriterComponent<JSONObject> bulkWriterComponent = new BulkWriterComponent<>(maxBatchTimeout);
bulkWriterComponent.addFlushPolicy(ackTuplesPolicy);
setWriterComponent(bulkWriterComponent);
bulkMessageWriter.init(stormConf, writerconf);
if (bulkMessageWriter instanceof HdfsWriter) {
((HdfsWriter) bulkMessageWriter).initFileNameFormat(context);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations