use of org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy in project metron by apache.
the class SourceHandler method initialize.
private void initialize() throws IOException {
this.fs = FileSystem.get(new Configuration());
this.currentFile = createOutputFile();
if (this.rotationPolicy instanceof TimedRotationPolicy) {
long interval = ((TimedRotationPolicy) this.rotationPolicy).getInterval();
this.rotationTimer = new Timer(true);
TimerTask task = new TimerTask() {
@Override
public void run() {
try {
rotateOutputFile();
} catch (IOException e) {
LOG.warn("IOException during scheduled file rotation.", e);
}
}
};
this.rotationTimer.scheduleAtFixedRate(task, interval, interval);
}
}
use of org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy in project storm by apache.
the class AbstractHdfsBolt method prepare.
/**
* Marked as final to prevent override. Subclasses should implement the doPrepare() method.
* @param conf
* @param topologyContext
* @param collector
*/
public final void prepare(Map conf, TopologyContext topologyContext, OutputCollector collector) {
this.writeLock = new Object();
if (this.syncPolicy == null)
throw new IllegalStateException("SyncPolicy must be specified.");
if (this.rotationPolicy == null)
throw new IllegalStateException("RotationPolicy must be specified.");
if (this.fsUrl == null) {
throw new IllegalStateException("File system URL must be specified.");
}
writers = new WritersMap(this.maxOpenFiles);
this.collector = collector;
this.fileNameFormat.prepare(conf, topologyContext);
this.hdfsConfig = new Configuration();
Map<String, Object> map = (Map<String, Object>) conf.get(this.configKey);
if (map != null) {
for (String key : map.keySet()) {
this.hdfsConfig.set(key, String.valueOf(map.get(key)));
}
}
try {
HdfsSecurityUtil.login(conf, hdfsConfig);
doPrepare(conf, topologyContext, collector);
} catch (Exception e) {
throw new RuntimeException("Error preparing HdfsBolt: " + e.getMessage(), e);
}
if (this.rotationPolicy instanceof TimedRotationPolicy) {
startTimedRotationPolicy();
}
}
use of org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy in project storm by apache.
the class AbstractHdfsBolt method prepare.
/**
* Marked as final to prevent override. Subclasses should implement the doPrepare() method.
*/
@Override
public final void prepare(Map<String, Object> conf, TopologyContext topologyContext, OutputCollector collector) {
this.writeLock = new Object();
if (this.syncPolicy == null) {
throw new IllegalStateException("SyncPolicy must be specified.");
}
if (this.rotationPolicy == null) {
throw new IllegalStateException("RotationPolicy must be specified.");
}
if (this.fsUrl == null) {
throw new IllegalStateException("File system URL must be specified.");
}
writers = new WritersMap(this.maxOpenFiles, collector);
this.collector = collector;
this.fileNameFormat.prepare(conf, topologyContext);
this.hdfsConfig = new Configuration();
Map<String, Object> map = (Map<String, Object>) conf.get(this.configKey);
if (map != null) {
for (String key : map.keySet()) {
this.hdfsConfig.set(key, String.valueOf(map.get(key)));
}
}
try {
HdfsSecurityUtil.login(conf, hdfsConfig);
doPrepare(conf, topologyContext, collector);
} catch (Exception e) {
throw new RuntimeException("Error preparing HdfsBolt: " + e.getMessage(), e);
}
if (this.rotationPolicy instanceof TimedRotationPolicy) {
startTimedRotationPolicy();
}
}
use of org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy in project storm by apache.
the class AbstractHdfsBolt method startTimedRotationPolicy.
private void startTimedRotationPolicy() {
long interval = ((TimedRotationPolicy) this.rotationPolicy).getInterval();
this.rotationTimer = new Timer(true);
TimerTask task = new TimerTask() {
@Override
public void run() {
doRotationAndRemoveAllWriters();
}
};
this.rotationTimer.scheduleAtFixedRate(task, interval, interval);
}
use of org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy in project storm by apache.
the class HdfsFileTopology method main.
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setNumWorkers(1);
SentenceSpout spout = new SentenceSpout();
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath("/tmp/foo/").withExtension(".txt");
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter("|");
Yaml yaml = new Yaml();
InputStream in = new FileInputStream(args[1]);
Map<String, Object> yamlConf = (Map<String, Object>) yaml.load(in);
in.close();
config.put("hdfs.config", yamlConf);
HdfsBolt bolt = new HdfsBolt().withConfigKey("hdfs.config").withFsUrl(args[0]).withFileNameFormat(fileNameFormat).withRecordFormat(format).withRotationPolicy(rotationPolicy).withSyncPolicy(syncPolicy).addRotationAction(new MoveFileAction().toDestination("/tmp/dest2/"));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
// SentenceSpout --> MyBolt
builder.setBolt(BOLT_ID, bolt, 4).shuffleGrouping(SENTENCE_SPOUT_ID);
String topoName = TOPOLOGY_NAME;
if (args.length == 3) {
topoName = args[2];
} else if (args.length > 3) {
System.out.println("Usage: HdfsFileTopology [hdfs url] [hdfs yaml config file] <topology name>");
return;
}
StormSubmitter.submitTopology(topoName, config, builder.createTopology());
}
Aggregations