Search in sources :

Example 6 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project storm by apache.

the class TridentTopology method build.

public StormTopology build() {
    DefaultDirectedGraph<Node, IndexedEdge> graph = (DefaultDirectedGraph) _graph.clone();
    completeDRPC(graph, _colocate, _gen);
    List<SpoutNode> spoutNodes = new ArrayList<>();
    // can be regular nodes (static state) or processor nodes
    Set<Node> boltNodes = new LinkedHashSet<>();
    for (Node n : graph.vertexSet()) {
        if (n instanceof SpoutNode) {
            spoutNodes.add((SpoutNode) n);
        } else if (!(n instanceof PartitionNode)) {
            boltNodes.add(n);
        }
    }
    Set<Group> initialGroups = new LinkedHashSet<>();
    for (List<Node> colocate : _colocate.values()) {
        Group g = new Group(graph, colocate);
        boltNodes.removeAll(colocate);
        initialGroups.add(g);
    }
    for (Node n : boltNodes) {
        initialGroups.add(new Group(graph, n));
    }
    GraphGrouper grouper = new GraphGrouper(graph, initialGroups);
    grouper.mergeFully();
    Collection<Group> mergedGroups = grouper.getAllGroups();
    // add identity partitions between groups
    for (IndexedEdge<Node> e : new HashSet<>(graph.edgeSet())) {
        if (!(e.source instanceof PartitionNode) && !(e.target instanceof PartitionNode)) {
            Group g1 = grouper.nodeGroup(e.source);
            Group g2 = grouper.nodeGroup(e.target);
            // g1 being null means the source is a spout node
            if (g1 == null && !(e.source instanceof SpoutNode))
                throw new RuntimeException("Planner exception: Null source group must indicate a spout node at this phase of planning");
            if (g1 == null || !g1.equals(g2)) {
                graph.removeEdge(e);
                PartitionNode pNode = makeIdentityPartition(e.source);
                graph.addVertex(pNode);
                graph.addEdge(e.source, pNode, new IndexedEdge(e.source, pNode, 0));
                graph.addEdge(pNode, e.target, new IndexedEdge(pNode, e.target, e.index));
            }
        }
    }
    // if one group subscribes to the same stream with same partitioning multiple times,
    // merge those together (otherwise can end up with many output streams created for that partitioning
    // if need to split into multiple output streams because of same input having different
    // partitioning to the group)
    // this is because can't currently merge splitting logic into a spout
    // not the most kosher algorithm here, since the grouper indexes are being trounced via the adding of nodes to random groups, but it 
    // works out
    List<Node> forNewGroups = new ArrayList<>();
    for (Group g : mergedGroups) {
        for (PartitionNode n : extraPartitionInputs(g)) {
            Node idNode = makeIdentityNode(n.allOutputFields);
            Node newPartitionNode = new PartitionNode(idNode.streamId, n.name, idNode.allOutputFields, n.thriftGrouping);
            Node parentNode = TridentUtils.getParent(graph, n);
            Set<IndexedEdge> outgoing = graph.outgoingEdgesOf(n);
            graph.removeVertex(n);
            graph.addVertex(idNode);
            graph.addVertex(newPartitionNode);
            addEdge(graph, parentNode, idNode, 0);
            addEdge(graph, idNode, newPartitionNode, 0);
            for (IndexedEdge e : outgoing) {
                addEdge(graph, newPartitionNode, e.target, e.index);
            }
            Group parentGroup = grouper.nodeGroup(parentNode);
            if (parentGroup == null) {
                forNewGroups.add(idNode);
            } else {
                parentGroup.nodes.add(idNode);
            }
        }
    }
    for (Node n : forNewGroups) {
        grouper.addGroup(new Group(graph, n));
    }
    // add in spouts as groups so we can get parallelisms
    for (Node n : spoutNodes) {
        grouper.addGroup(new Group(graph, n));
    }
    grouper.reindex();
    mergedGroups = grouper.getAllGroups();
    Map<Node, String> batchGroupMap = new HashMap<>();
    List<Set<Node>> connectedComponents = new ConnectivityInspector<>(graph).connectedSets();
    for (int i = 0; i < connectedComponents.size(); i++) {
        String groupId = "bg" + i;
        for (Node n : connectedComponents.get(i)) {
            batchGroupMap.put(n, groupId);
        }
    }
    //        System.out.println("GRAPH:");
    //        System.out.println(graph);
    Map<Group, Integer> parallelisms = getGroupParallelisms(graph, grouper, mergedGroups);
    TridentTopologyBuilder builder = new TridentTopologyBuilder();
    Map<Node, String> spoutIds = genSpoutIds(spoutNodes);
    Map<Group, String> boltIds = genBoltIds(mergedGroups);
    for (SpoutNode sn : spoutNodes) {
        Integer parallelism = parallelisms.get(grouper.nodeGroup(sn));
        Map<String, Number> spoutRes = new HashMap<>(_resourceDefaults);
        spoutRes.putAll(sn.getResources());
        Number onHeap = spoutRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
        Number offHeap = spoutRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB);
        Number cpuLoad = spoutRes.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
        SpoutDeclarer spoutDeclarer = null;
        if (sn.type == SpoutNode.SpoutType.DRPC) {
            spoutDeclarer = builder.setBatchPerTupleSpout(spoutIds.get(sn), sn.streamId, (IRichSpout) sn.spout, parallelism, batchGroupMap.get(sn));
        } else {
            ITridentSpout s;
            if (sn.spout instanceof IBatchSpout) {
                s = new BatchSpoutExecutor((IBatchSpout) sn.spout);
            } else if (sn.spout instanceof ITridentSpout) {
                s = (ITridentSpout) sn.spout;
            } else {
                throw new RuntimeException("Regular rich spouts not supported yet... try wrapping in a RichSpoutBatchExecutor");
            // TODO: handle regular rich spout without batches (need lots of updates to support this throughout)
            }
            spoutDeclarer = builder.setSpout(spoutIds.get(sn), sn.streamId, sn.txId, s, parallelism, batchGroupMap.get(sn));
        }
        if (onHeap != null) {
            if (offHeap != null) {
                spoutDeclarer.setMemoryLoad(onHeap, offHeap);
            } else {
                spoutDeclarer.setMemoryLoad(onHeap);
            }
        }
        if (cpuLoad != null) {
            spoutDeclarer.setCPULoad(cpuLoad);
        }
    }
    for (Group g : mergedGroups) {
        if (!isSpoutGroup(g)) {
            Integer p = parallelisms.get(g);
            Map<String, String> streamToGroup = getOutputStreamBatchGroups(g, batchGroupMap);
            Map<String, Number> groupRes = g.getResources(_resourceDefaults);
            Number onHeap = groupRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
            Number offHeap = groupRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB);
            Number cpuLoad = groupRes.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
            BoltDeclarer d = builder.setBolt(boltIds.get(g), new SubtopologyBolt(graph, g.nodes, batchGroupMap), p, committerBatches(g, batchGroupMap), streamToGroup);
            if (onHeap != null) {
                if (offHeap != null) {
                    d.setMemoryLoad(onHeap, offHeap);
                } else {
                    d.setMemoryLoad(onHeap);
                }
            }
            if (cpuLoad != null) {
                d.setCPULoad(cpuLoad);
            }
            Collection<PartitionNode> inputs = uniquedSubscriptions(externalGroupInputs(g));
            for (PartitionNode n : inputs) {
                Node parent = TridentUtils.getParent(graph, n);
                String componentId = parent instanceof SpoutNode ? spoutIds.get(parent) : boltIds.get(grouper.nodeGroup(parent));
                d.grouping(new GlobalStreamId(componentId, n.streamId), n.thriftGrouping);
            }
        }
    }
    HashMap<String, Number> combinedMasterCoordResources = new HashMap<String, Number>(_resourceDefaults);
    combinedMasterCoordResources.putAll(_masterCoordResources);
    return builder.buildTopology(combinedMasterCoordResources);
}
Also used : Group(org.apache.storm.trident.graph.Group) IBatchSpout(org.apache.storm.trident.spout.IBatchSpout) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) ProcessorNode(org.apache.storm.trident.planner.ProcessorNode) Node(org.apache.storm.trident.planner.Node) PartitionNode(org.apache.storm.trident.planner.PartitionNode) SpoutNode(org.apache.storm.trident.planner.SpoutNode) GraphGrouper(org.apache.storm.trident.graph.GraphGrouper) IndexedEdge(org.apache.storm.trident.util.IndexedEdge) BatchSpoutExecutor(org.apache.storm.trident.spout.BatchSpoutExecutor) TridentTopologyBuilder(org.apache.storm.trident.topology.TridentTopologyBuilder) SpoutNode(org.apache.storm.trident.planner.SpoutNode) PartitionNode(org.apache.storm.trident.planner.PartitionNode) IRichSpout(org.apache.storm.topology.IRichSpout) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) GlobalStreamId(org.apache.storm.generated.GlobalStreamId) SubtopologyBolt(org.apache.storm.trident.planner.SubtopologyBolt) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) ITridentSpout(org.apache.storm.trident.spout.ITridentSpout)

Example 7 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project presto by prestodb.

the class PhasedExecutionSchedule method extractPhases.

@VisibleForTesting
static List<Set<PlanFragmentId>> extractPhases(Collection<PlanFragment> fragments) {
    // Build a graph where the plan fragments are vertexes and the edges represent
    // a before -> after relationship.  For example, a join hash build has an edge
    // to the join probe.
    DirectedGraph<PlanFragmentId, DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class);
    fragments.forEach(fragment -> graph.addVertex(fragment.getId()));
    Visitor visitor = new Visitor(fragments, graph);
    for (PlanFragment fragment : fragments) {
        visitor.processFragment(fragment.getId());
    }
    // Computes all the strongly connected components of the directed graph.
    // These are the "phases" which hold the set of fragments that must be started
    // at the same time to avoid deadlock.
    List<Set<PlanFragmentId>> components = new StrongConnectivityInspector<>(graph).stronglyConnectedSets();
    Map<PlanFragmentId, Set<PlanFragmentId>> componentMembership = new HashMap<>();
    for (Set<PlanFragmentId> component : components) {
        for (PlanFragmentId planFragmentId : component) {
            componentMembership.put(planFragmentId, component);
        }
    }
    // build graph of components (phases)
    DirectedGraph<Set<PlanFragmentId>, DefaultEdge> componentGraph = new DefaultDirectedGraph<>(DefaultEdge.class);
    components.forEach(componentGraph::addVertex);
    for (DefaultEdge edge : graph.edgeSet()) {
        PlanFragmentId source = graph.getEdgeSource(edge);
        PlanFragmentId target = graph.getEdgeTarget(edge);
        Set<PlanFragmentId> from = componentMembership.get(source);
        Set<PlanFragmentId> to = componentMembership.get(target);
        if (!from.equals(to)) {
            // the topological order iterator below doesn't include vertices that have self-edges, so don't add them
            componentGraph.addEdge(from, to);
        }
    }
    List<Set<PlanFragmentId>> schedulePhases = ImmutableList.copyOf(new TopologicalOrderIterator<>(componentGraph));
    return schedulePhases;
}
Also used : HashSet(java.util.HashSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) PlanVisitor(com.facebook.presto.sql.planner.plan.PlanVisitor) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) HashMap(java.util.HashMap) DefaultEdge(org.jgrapht.graph.DefaultEdge) PlanFragment(com.facebook.presto.sql.planner.PlanFragment) PlanFragmentId(com.facebook.presto.sql.planner.plan.PlanFragmentId) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 8 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project jstorm by alibaba.

the class TridentTopology method build.

public StormTopology build() {
    // Transaction is not compatible with jstorm batch mode(task.batch.tuple)
    // so we close batch mode via system property
    System.setProperty(ConfigExtension.TASK_BATCH_TUPLE, "false");
    DefaultDirectedGraph<Node, IndexedEdge> graph = (DefaultDirectedGraph) _graph.clone();
    completeDRPC(graph, _colocate, _gen);
    List<SpoutNode> spoutNodes = new ArrayList<>();
    // can be regular nodes (static state) or processor nodes
    Set<Node> boltNodes = new LinkedHashSet<>();
    for (Node n : graph.vertexSet()) {
        if (n instanceof SpoutNode) {
            spoutNodes.add((SpoutNode) n);
        } else if (!(n instanceof PartitionNode)) {
            boltNodes.add(n);
        }
    }
    Set<Group> initialGroups = new LinkedHashSet<>();
    for (List<Node> colocate : _colocate.values()) {
        Group g = new Group(graph, colocate);
        boltNodes.removeAll(colocate);
        initialGroups.add(g);
    }
    for (Node n : boltNodes) {
        initialGroups.add(new Group(graph, n));
    }
    GraphGrouper grouper = new GraphGrouper(graph, initialGroups);
    grouper.mergeFully();
    Collection<Group> mergedGroups = grouper.getAllGroups();
    // add identity partitions between groups
    for (IndexedEdge<Node> e : new HashSet<>(graph.edgeSet())) {
        if (!(e.source instanceof PartitionNode) && !(e.target instanceof PartitionNode)) {
            Group g1 = grouper.nodeGroup(e.source);
            Group g2 = grouper.nodeGroup(e.target);
            // g1 being null means the source is a spout node
            if (g1 == null && !(e.source instanceof SpoutNode))
                throw new RuntimeException("Planner exception: Null source group must indicate a spout node at this phase of planning");
            if (g1 == null || !g1.equals(g2)) {
                graph.removeEdge(e);
                PartitionNode pNode = makeIdentityPartition(e.source);
                graph.addVertex(pNode);
                graph.addEdge(e.source, pNode, new IndexedEdge(e.source, pNode, 0));
                graph.addEdge(pNode, e.target, new IndexedEdge(pNode, e.target, e.index));
            }
        }
    }
    // if one group subscribes to the same stream with same partitioning multiple times,
    // merge those together (otherwise can end up with many output streams created for that partitioning
    // if need to split into multiple output streams because of same input having different
    // partitioning to the group)
    // this is because can't currently merge splitting logic into a spout
    // not the most kosher algorithm here, since the grouper indexes are being trounced via the adding of nodes to random groups, but it 
    // works out
    List<Node> forNewGroups = new ArrayList<>();
    for (Group g : mergedGroups) {
        for (PartitionNode n : extraPartitionInputs(g)) {
            Node idNode = makeIdentityNode(n.allOutputFields);
            Node newPartitionNode = new PartitionNode(idNode.streamId, n.name, idNode.allOutputFields, n.thriftGrouping);
            Node parentNode = TridentUtils.getParent(graph, n);
            Set<IndexedEdge> outgoing = graph.outgoingEdgesOf(n);
            graph.removeVertex(n);
            graph.addVertex(idNode);
            graph.addVertex(newPartitionNode);
            addEdge(graph, parentNode, idNode, 0);
            addEdge(graph, idNode, newPartitionNode, 0);
            for (IndexedEdge e : outgoing) {
                addEdge(graph, newPartitionNode, e.target, e.index);
            }
            Group parentGroup = grouper.nodeGroup(parentNode);
            if (parentGroup == null) {
                forNewGroups.add(idNode);
            } else {
                parentGroup.nodes.add(idNode);
            }
        }
    }
    for (Node n : forNewGroups) {
        grouper.addGroup(new Group(graph, n));
    }
    // add in spouts as groups so we can get parallelisms
    for (Node n : spoutNodes) {
        grouper.addGroup(new Group(graph, n));
    }
    grouper.reindex();
    mergedGroups = grouper.getAllGroups();
    Map<Node, String> batchGroupMap = new HashMap<>();
    List<Set<Node>> connectedComponents = new ConnectivityInspector<>(graph).connectedSets();
    for (int i = 0; i < connectedComponents.size(); i++) {
        String groupId = "bg" + i;
        for (Node n : connectedComponents.get(i)) {
            batchGroupMap.put(n, groupId);
        }
    }
    //        System.out.println("GRAPH:");
    //        System.out.println(graph);
    Map<Group, Integer> parallelisms = getGroupParallelisms(graph, grouper, mergedGroups);
    TridentTopologyBuilder builder = new TridentTopologyBuilder();
    Map<Node, String> spoutIds = genSpoutIds(spoutNodes);
    Map<Group, String> boltIds = genBoltIds(mergedGroups);
    Map defaults = Utils.readDefaultConfig();
    for (SpoutNode sn : spoutNodes) {
        Integer parallelism = parallelisms.get(grouper.nodeGroup(sn));
        Map<String, Number> spoutRes = null;
        spoutRes = mergeDefaultResources(sn.getResources(), defaults);
        Number onHeap = spoutRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
        Number offHeap = spoutRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB);
        Number cpuLoad = spoutRes.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
        if (sn.type == SpoutNode.SpoutType.DRPC) {
            builder.setBatchPerTupleSpout(spoutIds.get(sn), sn.streamId, (IRichSpout) sn.spout, parallelism, batchGroupMap.get(sn)).setMemoryLoad(onHeap, offHeap).setCPULoad(cpuLoad);
        } else {
            ITridentSpout s;
            if (sn.spout instanceof IBatchSpout) {
                s = new BatchSpoutExecutor((IBatchSpout) sn.spout);
            } else if (sn.spout instanceof ITridentSpout) {
                s = (ITridentSpout) sn.spout;
            } else {
                throw new RuntimeException("Regular rich spouts not supported yet... try wrapping in a RichSpoutBatchExecutor");
            // TODO: handle regular rich spout without batches (need lots of updates to support this throughout)
            }
            builder.setSpout(spoutIds.get(sn), sn.streamId, sn.txId, s, parallelism, batchGroupMap.get(sn)).setMemoryLoad(onHeap, offHeap).setCPULoad(cpuLoad);
        }
    }
    for (Group g : mergedGroups) {
        if (!isSpoutGroup(g)) {
            Integer p = parallelisms.get(g);
            Map<String, String> streamToGroup = getOutputStreamBatchGroups(g, batchGroupMap);
            Map<String, Number> groupRes = mergeDefaultResources(g.getResources(), defaults);
            Number onHeap = groupRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
            Number offHeap = groupRes.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB);
            Number cpuLoad = groupRes.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
            BoltDeclarer d = builder.setBolt(boltIds.get(g), new SubtopologyBolt(graph, g.nodes, batchGroupMap), p, committerBatches(g, batchGroupMap), streamToGroup).setMemoryLoad(onHeap, offHeap).setCPULoad(cpuLoad);
            Collection<PartitionNode> inputs = uniquedSubscriptions(externalGroupInputs(g));
            for (PartitionNode n : inputs) {
                Node parent = TridentUtils.getParent(graph, n);
                String componentId = parent instanceof SpoutNode ? spoutIds.get(parent) : boltIds.get(grouper.nodeGroup(parent));
                d.grouping(new GlobalStreamId(componentId, n.streamId), n.thriftGrouping);
            }
        }
    }
    return builder.buildTopology();
}
Also used : LinkedHashSet(java.util.LinkedHashSet) Group(storm.trident.graph.Group) Set(java.util.Set) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) IBatchSpout(storm.trident.spout.IBatchSpout) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SpoutNode(storm.trident.planner.SpoutNode) ProcessorNode(storm.trident.planner.ProcessorNode) PartitionNode(storm.trident.planner.PartitionNode) Node(storm.trident.planner.Node) ArrayList(java.util.ArrayList) GraphGrouper(storm.trident.graph.GraphGrouper) IndexedEdge(storm.trident.util.IndexedEdge) BatchSpoutExecutor(storm.trident.spout.BatchSpoutExecutor) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) TridentTopologyBuilder(storm.trident.topology.TridentTopologyBuilder) SpoutNode(storm.trident.planner.SpoutNode) PartitionNode(storm.trident.planner.PartitionNode) BoltDeclarer(backtype.storm.topology.BoltDeclarer) GlobalStreamId(backtype.storm.generated.GlobalStreamId) SubtopologyBolt(storm.trident.planner.SubtopologyBolt) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ITridentSpout(storm.trident.spout.ITridentSpout)

Aggregations

DefaultDirectedGraph (org.jgrapht.graph.DefaultDirectedGraph)8 HashMap (java.util.HashMap)4 HashSet (java.util.HashSet)3 Set (java.util.Set)3 GlobalStreamId (backtype.storm.generated.GlobalStreamId)2 BoltDeclarer (backtype.storm.topology.BoltDeclarer)2 ArrayList (java.util.ArrayList)2 DefaultEdge (org.jgrapht.graph.DefaultEdge)2 GraphGrouper (storm.trident.graph.GraphGrouper)2 Group (storm.trident.graph.Group)2 Node (storm.trident.planner.Node)2 PartitionNode (storm.trident.planner.PartitionNode)2 ProcessorNode (storm.trident.planner.ProcessorNode)2 SpoutNode (storm.trident.planner.SpoutNode)2 FlowStep (cascading.flow.FlowStep)1 BaseFlowStep (cascading.flow.planner.BaseFlowStep)1 FlowStepGraph (cascading.flow.planner.process.FlowStepGraph)1 ProcessEdge (cascading.flow.planner.process.ProcessEdge)1 PlanFragment (com.facebook.presto.sql.planner.PlanFragment)1 PlanFragmentId (com.facebook.presto.sql.planner.plan.PlanFragmentId)1