use of backtype.storm.generated.GlobalStreamId in project storm by nathanmarz.
the class TridentTopologyBuilder method fleshOutStreamBatchIds.
Map<GlobalStreamId, String> fleshOutStreamBatchIds(boolean includeCommitStream) {
Map<GlobalStreamId, String> ret = new HashMap<GlobalStreamId, String>(_batchIds);
Set<String> allBatches = new HashSet(_batchIds.values());
for (String b : allBatches) {
ret.put(new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.BATCH_STREAM_ID), b);
if (includeCommitStream) {
ret.put(new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID), b);
}
// DO NOT include the success stream as part of the batch. it should not trigger coordination tuples,
// and is just a metadata tuple to assist in cleanup, should not trigger batch tracking
}
for (String id : _spouts.keySet()) {
TransactionalSpoutComponent c = _spouts.get(id);
if (c.batchGroupId != null) {
ret.put(new GlobalStreamId(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID), c.batchGroupId);
}
}
//this takes care of setting up coord streams for spouts and bolts
for (GlobalStreamId s : _batchIds.keySet()) {
String b = _batchIds.get(s);
ret.put(new GlobalStreamId(s.get_componentId(), TridentBoltExecutor.COORD_STREAM(b)), b);
}
return ret;
}
use of backtype.storm.generated.GlobalStreamId in project storm by nathanmarz.
the class TridentTopology method build.
public StormTopology build() {
DefaultDirectedGraph<Node, IndexedEdge> graph = (DefaultDirectedGraph) _graph.clone();
completeDRPC(graph, _colocate, _gen);
List<SpoutNode> spoutNodes = new ArrayList<SpoutNode>();
// can be regular nodes (static state) or processor nodes
Set<Node> boltNodes = new HashSet<Node>();
for (Node n : graph.vertexSet()) {
if (n instanceof SpoutNode) {
spoutNodes.add((SpoutNode) n);
} else if (!(n instanceof PartitionNode)) {
boltNodes.add(n);
}
}
Set<Group> initialGroups = new HashSet<Group>();
for (List<Node> colocate : _colocate.values()) {
Group g = new Group(graph, colocate);
boltNodes.removeAll(colocate);
initialGroups.add(g);
}
for (Node n : boltNodes) {
initialGroups.add(new Group(graph, n));
}
GraphGrouper grouper = new GraphGrouper(graph, initialGroups);
grouper.mergeFully();
Collection<Group> mergedGroups = grouper.getAllGroups();
// add identity partitions between groups
for (IndexedEdge<Node> e : new HashSet<IndexedEdge>(graph.edgeSet())) {
if (!(e.source instanceof PartitionNode) && !(e.target instanceof PartitionNode)) {
Group g1 = grouper.nodeGroup(e.source);
Group g2 = grouper.nodeGroup(e.target);
// g1 being null means the source is a spout node
if (g1 == null && !(e.source instanceof SpoutNode))
throw new RuntimeException("Planner exception: Null source group must indicate a spout node at this phase of planning");
if (g1 == null || !g1.equals(g2)) {
graph.removeEdge(e);
PartitionNode pNode = makeIdentityPartition(e.source);
graph.addVertex(pNode);
graph.addEdge(e.source, pNode, new IndexedEdge(e.source, pNode, 0));
graph.addEdge(pNode, e.target, new IndexedEdge(pNode, e.target, e.index));
}
}
}
// if one group subscribes to the same stream with same partitioning multiple times,
// merge those together (otherwise can end up with many output streams created for that partitioning
// if need to split into multiple output streams because of same input having different
// partitioning to the group)
// this is because can't currently merge splitting logic into a spout
// not the most kosher algorithm here, since the grouper indexes are being trounced via the adding of nodes to random groups, but it
// works out
List<Node> forNewGroups = new ArrayList<Node>();
for (Group g : mergedGroups) {
for (PartitionNode n : extraPartitionInputs(g)) {
Node idNode = makeIdentityNode(n.allOutputFields);
Node newPartitionNode = new PartitionNode(idNode.streamId, n.name, idNode.allOutputFields, n.thriftGrouping);
Node parentNode = TridentUtils.getParent(graph, n);
Set<IndexedEdge> outgoing = graph.outgoingEdgesOf(n);
graph.removeVertex(n);
graph.addVertex(idNode);
graph.addVertex(newPartitionNode);
addEdge(graph, parentNode, idNode, 0);
addEdge(graph, idNode, newPartitionNode, 0);
for (IndexedEdge e : outgoing) {
addEdge(graph, newPartitionNode, e.target, e.index);
}
Group parentGroup = grouper.nodeGroup(parentNode);
if (parentGroup == null) {
forNewGroups.add(idNode);
} else {
parentGroup.nodes.add(idNode);
}
}
}
for (Node n : forNewGroups) {
grouper.addGroup(new Group(graph, n));
}
// add in spouts as groups so we can get parallelisms
for (Node n : spoutNodes) {
grouper.addGroup(new Group(graph, n));
}
grouper.reindex();
mergedGroups = grouper.getAllGroups();
Map<Node, String> batchGroupMap = new HashMap();
List<Set<Node>> connectedComponents = new ConnectivityInspector<Node, IndexedEdge>(graph).connectedSets();
for (int i = 0; i < connectedComponents.size(); i++) {
String groupId = "bg" + i;
for (Node n : connectedComponents.get(i)) {
batchGroupMap.put(n, groupId);
}
}
// System.out.println("GRAPH:");
// System.out.println(graph);
Map<Group, Integer> parallelisms = getGroupParallelisms(graph, grouper, mergedGroups);
TridentTopologyBuilder builder = new TridentTopologyBuilder();
Map<Node, String> spoutIds = genSpoutIds(spoutNodes);
Map<Group, String> boltIds = genBoltIds(mergedGroups);
for (SpoutNode sn : spoutNodes) {
Integer parallelism = parallelisms.get(grouper.nodeGroup(sn));
if (sn.type == SpoutNode.SpoutType.DRPC) {
builder.setBatchPerTupleSpout(spoutIds.get(sn), sn.streamId, (IRichSpout) sn.spout, parallelism, batchGroupMap.get(sn));
} else {
ITridentSpout s;
if (sn.spout instanceof IBatchSpout) {
s = new BatchSpoutExecutor((IBatchSpout) sn.spout);
} else if (sn.spout instanceof ITridentSpout) {
s = (ITridentSpout) sn.spout;
} else {
throw new RuntimeException("Regular rich spouts not supported yet... try wrapping in a RichSpoutBatchExecutor");
// TODO: handle regular rich spout without batches (need lots of updates to support this throughout)
}
builder.setSpout(spoutIds.get(sn), sn.streamId, sn.txId, s, parallelism, batchGroupMap.get(sn));
}
}
for (Group g : mergedGroups) {
if (!isSpoutGroup(g)) {
Integer p = parallelisms.get(g);
Map<String, String> streamToGroup = getOutputStreamBatchGroups(g, batchGroupMap);
BoltDeclarer d = builder.setBolt(boltIds.get(g), new SubtopologyBolt(graph, g.nodes, batchGroupMap), p, committerBatches(g, batchGroupMap), streamToGroup);
Collection<PartitionNode> inputs = uniquedSubscriptions(externalGroupInputs(g));
for (PartitionNode n : inputs) {
Node parent = TridentUtils.getParent(graph, n);
String componentId;
if (parent instanceof SpoutNode) {
componentId = spoutIds.get(parent);
} else {
componentId = boltIds.get(grouper.nodeGroup(parent));
}
d.grouping(new GlobalStreamId(componentId, n.streamId), n.thriftGrouping);
}
}
}
return builder.buildTopology();
}
use of backtype.storm.generated.GlobalStreamId in project heron by twitter.
the class CustomStreamGroupingDelegate method prepare.
@Override
public void prepare(com.twitter.heron.api.topology.TopologyContext context, String component, String streamId, List<Integer> targetTasks) {
TopologyContext c = new TopologyContext(context);
GlobalStreamId g = new GlobalStreamId(component, streamId);
delegate.prepare(c, g, targetTasks);
}
use of backtype.storm.generated.GlobalStreamId in project jstorm by alibaba.
the class GeneralTopologyContext method getTargets.
/**
* Gets information about who is consuming the outputs of the specified component, and how.
*
* @return Map from stream id to component id to the Grouping used.
*/
public Map<String, Map<String, Grouping>> getTargets(String componentId) {
Map<String, Map<String, Grouping>> ret = new HashMap<String, Map<String, Grouping>>();
for (String otherComponentId : getComponentIds()) {
Map<GlobalStreamId, Grouping> inputs = getComponentCommon(otherComponentId).get_inputs();
for (GlobalStreamId id : inputs.keySet()) {
if (id.get_componentId().equals(componentId)) {
Map<String, Grouping> curr = ret.get(id.get_streamId());
if (curr == null)
curr = new HashMap<String, Grouping>();
curr.put(otherComponentId, inputs.get(id));
ret.put(id.get_streamId(), curr);
}
}
}
return ret;
}
use of backtype.storm.generated.GlobalStreamId in project jstorm by alibaba.
the class TopologyContext method getThisSourceComponentTasks.
public Map<String, List<Integer>> getThisSourceComponentTasks() {
Map<String, List<Integer>> ret = new HashMap<>();
Map<GlobalStreamId, Grouping> sources = getThisSources();
Set<String> sourceComponents = new HashSet<>();
if (sources != null) {
for (GlobalStreamId globalStreamId : sources.keySet()) {
sourceComponents.add(globalStreamId.get_componentId());
}
}
for (String component : sourceComponents) {
ret.put(component, getComponentTasks(component));
}
return ret;
}
Aggregations