use of org.apache.storm.streams.processors.ForwardingProcessorContext in project storm by apache.
the class ProcessorBoltDelegate method prepare.
void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.stormConf = stormConf;
topologyContext = context;
outputCollector = collector;
DirectedSubgraph<Node, Edge> subgraph = new DirectedSubgraph<>(graph, new HashSet<>(nodes), null);
TopologicalOrderIterator<Node, Edge> it = new TopologicalOrderIterator<>(subgraph);
while (it.hasNext()) {
Node node = it.next();
if (!(node instanceof ProcessorNode)) {
throw new IllegalStateException("Not a processor node " + node);
}
ProcessorNode processorNode = (ProcessorNode) node;
List<ProcessorNode> children = StreamUtil.getChildren(subgraph, processorNode);
ProcessorContext processorContext;
if (children.isEmpty()) {
processorContext = createEmittingContext(processorNode);
} else {
Multimap<String, ProcessorNode> streamToChildren = ArrayListMultimap.create();
for (ProcessorNode child : children) {
for (String stream : child.getParentStreams(processorNode)) {
streamToChildren.put(stream, child);
}
}
ForwardingProcessorContext forwardingContext = new ForwardingProcessorContext(processorNode, streamToChildren);
if (hasOutgoingChild(processorNode, new HashSet<>(children))) {
processorContext = new ChainedProcessorContext(processorNode, forwardingContext, createEmittingContext(processorNode));
} else {
processorContext = forwardingContext;
}
}
processorNode.initProcessorContext(processorContext);
}
if (timestampField != null) {
for (EmittingProcessorContext ctx : emittingProcessorContexts) {
ctx.setTimestampField(timestampField);
}
}
for (String stream : streamToInitialProcessors.keySet()) {
streamToInputTaskCount.put(stream, getStreamInputTaskCount(context, stream));
}
}
use of org.apache.storm.streams.processors.ForwardingProcessorContext in project storm by apache.
the class ProcessorBoltDelegate method prepare.
void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
this.topoConf = topoConf;
topologyContext = context;
outputCollector = collector;
DirectedSubgraph<Node, Edge> subgraph = new DirectedSubgraph<>(graph, new HashSet<>(nodes), null);
TopologicalOrderIterator<Node, Edge> it = new TopologicalOrderIterator<>(subgraph);
while (it.hasNext()) {
Node node = it.next();
if (!(node instanceof ProcessorNode)) {
throw new IllegalStateException("Not a processor node " + node);
}
ProcessorNode processorNode = (ProcessorNode) node;
List<ProcessorNode> children = StreamUtil.getChildren(subgraph, processorNode);
ProcessorContext processorContext;
if (children.isEmpty()) {
processorContext = createEmittingContext(processorNode);
} else {
Multimap<String, ProcessorNode> streamToChildren = ArrayListMultimap.create();
for (ProcessorNode child : children) {
for (String stream : child.getParentStreams(processorNode)) {
streamToChildren.put(stream, child);
}
}
ForwardingProcessorContext forwardingContext = new ForwardingProcessorContext(processorNode, streamToChildren);
if (hasOutgoingChild(processorNode, new HashSet<>(children))) {
processorContext = new ChainedProcessorContext(processorNode, forwardingContext, createEmittingContext(processorNode));
} else {
processorContext = forwardingContext;
}
}
processorNode.initProcessorContext(processorContext);
}
if (timestampField != null) {
for (EmittingProcessorContext ctx : emittingProcessorContexts) {
ctx.setTimestampField(timestampField);
}
}
for (String stream : streamToInitialProcessors.keySet()) {
streamToInputTaskCount.put(stream, getStreamInputTaskCount(context, stream));
}
}
Aggregations