use of org.apache.storm.trident.Stream in project storm by apache.
the class SampleOpenTsdbTridentTopology method main.
public static void main(String[] args) throws Exception {
if (args.length == 0) {
throw new IllegalArgumentException("There should be at least one argument. Run as `SampleOpenTsdbTridentTopology <tsdb-url>`");
}
String tsdbUrl = args[0];
final OpenTsdbClient.Builder openTsdbClientBuilder = OpenTsdbClient.newBuilder(tsdbUrl);
final OpenTsdbStateFactory openTsdbStateFactory = new OpenTsdbStateFactory(openTsdbClientBuilder, Collections.singletonList(TupleOpenTsdbDatapointMapper.DEFAULT_MAPPER));
TridentTopology tridentTopology = new TridentTopology();
final Stream stream = tridentTopology.newStream("metric-tsdb-stream", new MetricGenBatchSpout(10));
stream.peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
LOG.info("########### Received tuple: [{}]", input);
}
}).partitionPersist(openTsdbStateFactory, MetricGenSpout.DEFAULT_METRIC_FIELDS, new OpenTsdbStateUpdater());
Config conf = new Config();
conf.setDebug(true);
String topoName = "word-count";
if (args.length > 1) {
topoName = args[1];
}
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(topoName, conf, tridentTopology.build());
}
use of org.apache.storm.trident.Stream in project storm by apache.
the class TridentKafkaTopology method buildTopology.
private static StormTopology buildTopology(String brokerConnectionString) {
Fields fields = new Fields("word", "count");
FixedBatchSpout spout = new FixedBatchSpout(fields, 4, new Values("storm", "1"), new Values("trident", "1"), new Values("needs", "1"), new Values("javadoc", "1"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout);
Properties props = new Properties();
props.put("bootstrap.servers", brokerConnectionString);
props.put("acks", "1");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory().withProducerProperties(props).withKafkaTopicSelector(new DefaultTopicSelector("test")).withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count"));
stream.partitionPersist(stateFactory, fields, new TridentKafkaUpdater(), new Fields());
return topology.build();
}
use of org.apache.storm.trident.Stream in project storm by apache.
the class TridentProjectRel method tridentPlan.
@Override
public void tridentPlan(TridentPlanCreator planCreator) throws Exception {
// SingleRel
RelNode input = getInput();
StormRelUtils.getStormRelInput(input).tridentPlan(planCreator);
Stream inputStream = planCreator.pop().toStream();
String stageName = StormRelUtils.getStageName(this);
String projectionClassName = StormRelUtils.getClassName(this);
List<String> outputFieldNames = getRowType().getFieldNames();
int outputCount = outputFieldNames.size();
List<RexNode> childExps = getChildExps();
RelDataType inputRowType = getInput(0).getRowType();
ExecutableExpression projectionInstance = planCreator.createScalarInstance(childExps, inputRowType, projectionClassName);
Stream finalStream = inputStream.map(new EvaluationFunction(projectionInstance, outputCount, planCreator.getDataContext()), new Fields(outputFieldNames)).name(stageName);
planCreator.addStream(finalStream);
}
use of org.apache.storm.trident.Stream in project storm by apache.
the class SampleDruidBoltTridentTopology method main.
public static void main(String[] args) throws Exception {
if (args.length == 0) {
throw new IllegalArgumentException("There should be at least one argument. Run as `SampleDruidBoltTridentTopology <zk-url>`");
}
TridentTopology tridentTopology = new TridentTopology();
DruidBeamFactory druidBeamFactory = new SampleDruidBeamFactoryImpl(new HashMap<String, Object>());
ITupleDruidEventMapper<Map<String, Object>> eventMapper = new TupleDruidEventMapper<>(TupleDruidEventMapper.DEFAULT_FIELD_NAME);
final Stream stream = tridentTopology.newStream("batch-event-gen", new SimpleBatchSpout(10));
stream.peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
LOG.info("########### Received tuple: [{}]", input);
}
}).partitionPersist(new DruidBeamStateFactory<Map<String, Object>>(druidBeamFactory, eventMapper), new Fields("event"), new DruidBeamStateUpdater());
Config conf = new Config();
conf.setDebug(true);
conf.put("druid.tranquility.zk.connect", args[0]);
if (args.length > 1) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[1], conf, tridentTopology.build());
} else {
conf.setMaxTaskParallelism(3);
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology("druid-test", conf, tridentTopology.build())) {
Thread.sleep(30000);
}
System.exit(0);
}
}
use of org.apache.storm.trident.Stream in project storm by apache.
the class WordCountTrident method buildTopology.
public static StormTopology buildTopology(String hbaseRoot) {
Fields fields = new Fields("word", "count");
FixedBatchSpout spout = new FixedBatchSpout(fields, 4, new Values("storm", 1), new Values("trident", 1), new Values("needs", 1), new Values("javadoc", 1));
spout.setCycle(true);
TridentHBaseMapper tridentHBaseMapper = new SimpleTridentHBaseMapper().withColumnFamily("cf").withColumnFields(new Fields("word")).withCounterFields(new Fields("count")).withRowKeyField("word");
HBaseValueMapper rowToStormValueMapper = new WordCountValueMapper();
HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"));
HBaseState.Options options = new HBaseState.Options().withConfigKey(hbaseRoot).withDurability(Durability.SYNC_WAL).withMapper(tridentHBaseMapper).withProjectionCriteria(projectionCriteria).withRowToStormValueMapper(rowToStormValueMapper).withTableName("WordCount");
StateFactory factory = new HBaseStateFactory(options);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout);
stream.partitionPersist(factory, fields, new HBaseUpdater(), new Fields());
TridentState state = topology.newStaticState(factory);
stream = stream.stateQuery(state, new Fields("word"), new HBaseQuery(), new Fields("columnName", "columnValue"));
stream.each(new Fields("word", "columnValue"), new PrintFunction(), new Fields());
return topology.build();
}
Aggregations