use of org.apache.storm.hdfs.bolt.format.DefaultSequenceFormat in project storm by apache.
the class TestSequenceFileBolt method makeSeqBolt.
private SequenceFileBolt makeSeqBolt(String nameNodeAddr, int countSync, float rotationSizeMB) {
SyncPolicy fieldsSyncPolicy = new CountSyncPolicy(countSync);
FileRotationPolicy fieldsRotationPolicy = new FileSizeRotationPolicy(rotationSizeMB, FileSizeRotationPolicy.Units.MB);
FileNameFormat fieldsFileNameFormat = new DefaultFileNameFormat().withPath(testRoot);
SequenceFormat seqFormat = new DefaultSequenceFormat("key", "value");
return new SequenceFileBolt().withFsUrl(nameNodeAddr).withFileNameFormat(fieldsFileNameFormat).withRotationPolicy(fieldsRotationPolicy).withSequenceFormat(seqFormat).withSyncPolicy(fieldsSyncPolicy);
}
use of org.apache.storm.hdfs.bolt.format.DefaultSequenceFormat in project storm by apache.
the class SequenceFileTopology method main.
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setNumWorkers(1);
SentenceSpout spout = new SentenceSpout();
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath("/tmp/source/").withExtension(".seq");
// create sequence format instance.
DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");
Yaml yaml = new Yaml();
InputStream in = new FileInputStream(args[1]);
Map<String, Object> yamlConf = (Map<String, Object>) yaml.load(in);
in.close();
config.put("hdfs.config", yamlConf);
SequenceFileBolt bolt = new SequenceFileBolt().withFsUrl(args[0]).withConfigKey("hdfs.config").withFileNameFormat(fileNameFormat).withSequenceFormat(format).withRotationPolicy(rotationPolicy).withSyncPolicy(syncPolicy).withCompressionType(SequenceFile.CompressionType.RECORD).withCompressionCodec("deflate").addRotationAction(new MoveFileAction().toDestination("/tmp/dest/"));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
// SentenceSpout --> MyBolt
builder.setBolt(BOLT_ID, bolt, 4).shuffleGrouping(SENTENCE_SPOUT_ID);
String topoName = TOPOLOGY_NAME;
if (args.length == 3) {
topoName = args[2];
} else if (args.length > 3) {
System.out.println("Usage: SequenceFileTopology [hdfs url] [hdfs yaml config file] <topology name>");
return;
}
StormSubmitter.submitTopology(topoName, config, builder.createTopology());
}
Aggregations