use of org.apache.storm.shade.org.jgrapht.graph.DirectedSubgraph in project storm by apache.
the class ProcessorBoltDelegate method prepare.
void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
this.topoConf = topoConf;
topologyContext = context;
outputCollector = collector;
DirectedSubgraph<Node, Edge> subgraph = new DirectedSubgraph<>(graph, new HashSet<>(nodes), null);
TopologicalOrderIterator<Node, Edge> it = new TopologicalOrderIterator<>(subgraph);
while (it.hasNext()) {
Node node = it.next();
if (!(node instanceof ProcessorNode)) {
throw new IllegalStateException("Not a processor node " + node);
}
ProcessorNode processorNode = (ProcessorNode) node;
List<ProcessorNode> children = StreamUtil.getChildren(subgraph, processorNode);
ProcessorContext processorContext;
if (children.isEmpty()) {
processorContext = createEmittingContext(processorNode);
} else {
Multimap<String, ProcessorNode> streamToChildren = ArrayListMultimap.create();
for (ProcessorNode child : children) {
for (String stream : child.getParentStreams(processorNode)) {
streamToChildren.put(stream, child);
}
}
ForwardingProcessorContext forwardingContext = new ForwardingProcessorContext(processorNode, streamToChildren);
if (hasOutgoingChild(processorNode, new HashSet<>(children))) {
processorContext = new ChainedProcessorContext(processorNode, forwardingContext, createEmittingContext(processorNode));
} else {
processorContext = forwardingContext;
}
}
processorNode.initProcessorContext(processorContext);
}
if (timestampField != null) {
for (EmittingProcessorContext ctx : emittingProcessorContexts) {
ctx.setTimestampField(timestampField);
}
}
for (String stream : streamToInitialProcessors.keySet()) {
streamToInputTaskCount.put(stream, getStreamInputTaskCount(context, stream));
}
}
use of org.apache.storm.shade.org.jgrapht.graph.DirectedSubgraph in project storm by apache.
the class SubtopologyBolt method prepare.
@Override
public void prepare(Map<String, Object> conf, TopologyContext context, BatchOutputCollector batchCollector) {
int thisComponentNumTasks = context.getComponentTasks(context.getThisComponentId()).size();
for (Node n : nodes) {
if (n.stateInfo != null) {
State s = n.stateInfo.spec.stateFactory.makeState(conf, context, context.getThisTaskIndex(), thisComponentNumTasks);
context.setTaskData(n.stateInfo.id, s);
}
}
DirectedSubgraph<Node, ?> subgraph = new DirectedSubgraph<>(graph, nodes, null);
TopologicalOrderIterator<Node, ?> it = new TopologicalOrderIterator<>(subgraph);
int stateIndex = 0;
while (it.hasNext()) {
Node n = it.next();
if (n instanceof ProcessorNode) {
ProcessorNode pn = (ProcessorNode) n;
String batchGroup = batchGroups.get(n);
if (!myTopologicallyOrdered.containsKey(batchGroup)) {
myTopologicallyOrdered.put(batchGroup, new ArrayList<>());
}
myTopologicallyOrdered.get(batchGroup).add(pn.processor);
List<String> parentStreams = new ArrayList<>();
List<Factory> parentFactories = new ArrayList<>();
for (Node p : TridentUtils.getParents(graph, n)) {
parentStreams.add(p.streamId);
if (nodes.contains(p)) {
parentFactories.add(outputFactories.get(p));
} else {
if (!roots.containsKey(p.streamId)) {
roots.put(p.streamId, new InitialReceiver(p.streamId, getSourceOutputFields(context, p.streamId)));
}
roots.get(p.streamId).addReceiver(pn.processor);
parentFactories.add(roots.get(p.streamId).getOutputFactory());
}
}
List<TupleReceiver> targets = new ArrayList<>();
boolean outgoingNode = false;
for (Node cn : TridentUtils.getChildren(graph, n)) {
if (nodes.contains(cn)) {
targets.add(((ProcessorNode) cn).processor);
} else {
outgoingNode = true;
}
}
if (outgoingNode) {
targets.add(new BridgeReceiver(batchCollector));
}
TridentContext triContext = new TridentContext(pn.selfOutFields, parentFactories, parentStreams, targets, pn.streamId, stateIndex, batchCollector);
pn.processor.prepare(conf, context, triContext);
outputFactories.put(n, pn.processor.getOutputFactory());
}
stateIndex++;
}
}
Aggregations