use of org.apache.storm.shade.org.jgrapht.traverse.TopologicalOrderIterator in project storm by apache.
the class StreamBuilder method build.
/**
* Builds a new {@link StormTopology} for the computation expressed via the stream api.
*
* @return the storm topology
*/
public StormTopology build() {
nodeGroupingInfo.clear();
windowInfo.clear();
curGroup.clear();
TopologicalOrderIterator<Node, Edge> iterator = new TopologicalOrderIterator<>(graph, queue());
TopologyBuilder topologyBuilder = new TopologyBuilder();
while (iterator.hasNext()) {
Node node = iterator.next();
if (node instanceof SpoutNode) {
addSpout(topologyBuilder, (SpoutNode) node);
} else if (node instanceof ProcessorNode) {
handleProcessorNode((ProcessorNode) node, topologyBuilder);
} else if (node instanceof PartitionNode) {
updateNodeGroupingInfo((PartitionNode) node);
processCurGroup(topologyBuilder);
} else if (node instanceof WindowNode) {
updateWindowInfo((WindowNode) node);
processCurGroup(topologyBuilder);
} else if (node instanceof SinkNode) {
processCurGroup(topologyBuilder);
addSink(topologyBuilder, (SinkNode) node);
}
}
processCurGroup(topologyBuilder);
mayBeAddTsField();
return topologyBuilder.createTopology();
}
use of org.apache.storm.shade.org.jgrapht.traverse.TopologicalOrderIterator in project storm by apache.
the class ProcessorBoltDelegate method prepare.
void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
this.topoConf = topoConf;
topologyContext = context;
outputCollector = collector;
DirectedSubgraph<Node, Edge> subgraph = new DirectedSubgraph<>(graph, new HashSet<>(nodes), null);
TopologicalOrderIterator<Node, Edge> it = new TopologicalOrderIterator<>(subgraph);
while (it.hasNext()) {
Node node = it.next();
if (!(node instanceof ProcessorNode)) {
throw new IllegalStateException("Not a processor node " + node);
}
ProcessorNode processorNode = (ProcessorNode) node;
List<ProcessorNode> children = StreamUtil.getChildren(subgraph, processorNode);
ProcessorContext processorContext;
if (children.isEmpty()) {
processorContext = createEmittingContext(processorNode);
} else {
Multimap<String, ProcessorNode> streamToChildren = ArrayListMultimap.create();
for (ProcessorNode child : children) {
for (String stream : child.getParentStreams(processorNode)) {
streamToChildren.put(stream, child);
}
}
ForwardingProcessorContext forwardingContext = new ForwardingProcessorContext(processorNode, streamToChildren);
if (hasOutgoingChild(processorNode, new HashSet<>(children))) {
processorContext = new ChainedProcessorContext(processorNode, forwardingContext, createEmittingContext(processorNode));
} else {
processorContext = forwardingContext;
}
}
processorNode.initProcessorContext(processorContext);
}
if (timestampField != null) {
for (EmittingProcessorContext ctx : emittingProcessorContexts) {
ctx.setTimestampField(timestampField);
}
}
for (String stream : streamToInitialProcessors.keySet()) {
streamToInputTaskCount.put(stream, getStreamInputTaskCount(context, stream));
}
}
use of org.apache.storm.shade.org.jgrapht.traverse.TopologicalOrderIterator in project storm by apache.
the class SubtopologyBolt method prepare.
@Override
public void prepare(Map<String, Object> conf, TopologyContext context, BatchOutputCollector batchCollector) {
int thisComponentNumTasks = context.getComponentTasks(context.getThisComponentId()).size();
for (Node n : nodes) {
if (n.stateInfo != null) {
State s = n.stateInfo.spec.stateFactory.makeState(conf, context, context.getThisTaskIndex(), thisComponentNumTasks);
context.setTaskData(n.stateInfo.id, s);
}
}
DirectedSubgraph<Node, ?> subgraph = new DirectedSubgraph<>(graph, nodes, null);
TopologicalOrderIterator<Node, ?> it = new TopologicalOrderIterator<>(subgraph);
int stateIndex = 0;
while (it.hasNext()) {
Node n = it.next();
if (n instanceof ProcessorNode) {
ProcessorNode pn = (ProcessorNode) n;
String batchGroup = batchGroups.get(n);
if (!myTopologicallyOrdered.containsKey(batchGroup)) {
myTopologicallyOrdered.put(batchGroup, new ArrayList<>());
}
myTopologicallyOrdered.get(batchGroup).add(pn.processor);
List<String> parentStreams = new ArrayList<>();
List<Factory> parentFactories = new ArrayList<>();
for (Node p : TridentUtils.getParents(graph, n)) {
parentStreams.add(p.streamId);
if (nodes.contains(p)) {
parentFactories.add(outputFactories.get(p));
} else {
if (!roots.containsKey(p.streamId)) {
roots.put(p.streamId, new InitialReceiver(p.streamId, getSourceOutputFields(context, p.streamId)));
}
roots.get(p.streamId).addReceiver(pn.processor);
parentFactories.add(roots.get(p.streamId).getOutputFactory());
}
}
List<TupleReceiver> targets = new ArrayList<>();
boolean outgoingNode = false;
for (Node cn : TridentUtils.getChildren(graph, n)) {
if (nodes.contains(cn)) {
targets.add(((ProcessorNode) cn).processor);
} else {
outgoingNode = true;
}
}
if (outgoingNode) {
targets.add(new BridgeReceiver(batchCollector));
}
TridentContext triContext = new TridentContext(pn.selfOutFields, parentFactories, parentStreams, targets, pn.streamId, stateIndex, batchCollector);
pn.processor.prepare(conf, context, triContext);
outputFactories.put(n, pn.processor.getOutputFactory());
}
stateIndex++;
}
}
Aggregations