use of org.apache.storm.trident.TridentTopology in project storm by apache.
the class UserPersistanceTridentTopology method getTopology.
@Override
public StormTopology getTopology() {
TridentTopology topology = new TridentTopology();
JdbcState.Options options = new JdbcState.Options().withConnectionProvider(connectionProvider).withMapper(this.jdbcMapper).withJdbcLookupMapper(new SimpleJdbcLookupMapper(new Fields("dept_name"), Lists.newArrayList(new Column("user_id", Types.INTEGER)))).withTableName(TABLE_NAME).withSelectQuery(SELECT_QUERY);
JdbcStateFactory jdbcStateFactory = new JdbcStateFactory(options);
Stream stream = topology.newStream("userSpout", new UserSpout());
TridentState state = topology.newStaticState(jdbcStateFactory);
stream = stream.stateQuery(state, new Fields("user_id", "user_name", "create_date"), new JdbcQuery(), new Fields("dept_name"));
stream.partitionPersist(jdbcStateFactory, new Fields("user_id", "user_name", "dept_name", "create_date"), new JdbcUpdater(), new Fields());
return topology.build();
}
use of org.apache.storm.trident.TridentTopology in project storm by apache.
the class TridentKafkaConsumerTopology method newTopology.
/**
* @param drpc The DRPC stream to be used in querying the word counts. Can be null in distributed mode
* @return a trident topology that consumes sentences from the kafka topic specified using a
* {@link TransactionalTridentKafkaSpout} computes the word count and stores it in a {@link MemoryMapState}.
*/
public static StormTopology newTopology(LocalDRPC drpc, ITridentDataSource tridentSpout) {
final TridentTopology tridentTopology = new TridentTopology();
addDRPCStream(tridentTopology, addTridentState(tridentTopology, tridentSpout), drpc);
return tridentTopology.build();
}
use of org.apache.storm.trident.TridentTopology in project storm by apache.
the class TridentWordCount method buildTopology.
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")).parallelismHint(16);
topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields("word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"), new FilterNull()).project(new Fields("word", "count"));
return topology.build();
}
use of org.apache.storm.trident.TridentTopology in project storm by apache.
the class QueryPlanner method compile.
public AbstractTridentProcessor compile(Map<String, ISqlTridentDataSource> sources, String query) throws Exception {
TridentRel relNode = getPlan(query);
TridentPlanCreator tridentPlanCreator = new TridentPlanCreator(sources, new RexBuilder(typeFactory));
relNode.tridentPlan(tridentPlanCreator);
final TridentTopology topology = tridentPlanCreator.getTopology();
final IAggregatableStream lastStream = tridentPlanCreator.pop();
final DataContext dc = tridentPlanCreator.getDataContext();
final List<CompilingClassLoader> cls = tridentPlanCreator.getClassLoaders();
return new AbstractTridentProcessor() {
@Override
public TridentTopology build() {
return topology;
}
@Override
public Stream outputStream() {
return lastStream.toStream();
}
@Override
public DataContext getDataContext() {
return dc;
}
@Override
public List<CompilingClassLoader> getClassLoaders() {
return cls;
}
};
}
use of org.apache.storm.trident.TridentTopology in project storm by apache.
the class StormSqlImpl method submit.
@Override
public void submit(String name, Iterable<String> statements, Map<String, ?> stormConf, SubmitOptions opts, StormSubmitter.ProgressListener progressListener, String asUser) throws Exception {
Map<String, ISqlTridentDataSource> dataSources = new HashMap<>();
for (String sql : statements) {
StormParser parser = new StormParser(sql);
SqlNode node = parser.impl().parseSqlStmtEof();
if (node instanceof SqlCreateTable) {
handleCreateTableForTrident((SqlCreateTable) node, dataSources);
} else if (node instanceof SqlCreateFunction) {
handleCreateFunction((SqlCreateFunction) node);
} else {
QueryPlanner planner = new QueryPlanner(schema);
AbstractTridentProcessor processor = planner.compile(dataSources, sql);
TridentTopology topo = processor.build();
Path jarPath = null;
try {
// QueryPlanner on Trident mode configures the topology with compiled classes,
// so we need to add new classes into topology jar
// Topology will be serialized and sent to Nimbus, and deserialized and executed in workers.
jarPath = Files.createTempFile("storm-sql", ".jar");
System.setProperty("storm.jar", jarPath.toString());
packageTopology(jarPath, processor);
StormSubmitter.submitTopologyAs(name, stormConf, topo.build(), opts, progressListener, asUser);
} finally {
if (jarPath != null) {
Files.delete(jarPath);
}
}
}
}
}
Aggregations