Search in sources :

Example 1 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project jop by jop-devel.

the class Template method exportDOT.

public void exportDOT(File dbgFile) throws IOException {
    DirectedGraph<Location, DefaultEdge> locGraph = new DefaultDirectedGraph<Location, DefaultEdge>(DefaultEdge.class);
    for (Location l : this.locations.values()) locGraph.addVertex(l);
    Map<DefaultEdge, String> edgeMap = new HashMap<DefaultEdge, String>();
    for (Transition t : this.transitions) {
        DefaultEdge e = locGraph.addEdge(t.getSource(), t.getTarget());
        edgeMap.put(e, t.getAttrs().toString());
    }
    FileWriter fw = new FileWriter(dbgFile);
    AdvancedDOTExporter.DOTNodeLabeller<Location> nodeLabeller = new AdvancedDOTExporter.DefaultNodeLabeller<Location>() {

        public int getID(Location node) {
            return node.getId();
        }

        public String getLabel(Location node) {
            return node.getName();
        }
    };
    AdvancedDOTExporter.DOTLabeller<DefaultEdge> edgeLabeller = new AdvancedDOTExporter.MapLabeller<DefaultEdge>(edgeMap);
    AdvancedDOTExporter<Location, DefaultEdge> dotExport = new AdvancedDOTExporter<Location, DefaultEdge>(nodeLabeller, edgeLabeller);
    dotExport.exportDOT(fw, locGraph);
    fw.close();
}
Also used : DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) HashMap(java.util.HashMap) FileWriter(java.io.FileWriter) DefaultEdge(org.jgrapht.graph.DefaultEdge) AdvancedDOTExporter(com.jopdesign.common.graphutils.AdvancedDOTExporter)

Example 2 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project ambrose by twitter.

the class AmbroseCascadingNotifier method onStarting.

/**
   * The onStarting event is fired when a Flow instance receives the start() message. A Flow is cut
   * down into executing units called stepFlow. A stepFlow contains a stepFlowJob which represents
   * the mapreduce job to be submitted to Hadoop. The ambrose graph is constructed from the step
   * graph found in flow object.
   *
   * @param flow the flow.
   */
@Override
@SuppressWarnings("unchecked")
public void onStarting(Flow flow) {
    // init flow
    List<FlowStep> steps = flow.getFlowSteps();
    totalNumberOfJobs = steps.size();
    currentFlowId = flow.getID();
    Properties props = new Properties();
    props.putAll(flow.getConfigAsProperties());
    try {
        statsWriteService.initWriteService(props);
    } catch (IOException e) {
        LOG.error("Failed to initialize statsWriteService", e);
    }
    // convert graph from cascading to jgrapht
    FlowStepGraph flowStepGraph = Flows.getStepGraphFrom(flow);
    DirectedGraph graph = new DefaultDirectedGraph<BaseFlowStep, FlowGraphEdge>(new EdgeFactory<BaseFlowStep, FlowGraphEdge>() {

        @Override
        public FlowGraphEdge createEdge(BaseFlowStep src, BaseFlowStep dest) {
            return new FlowGraphEdge(src.getID(), dest.getID());
        }
    });
    for (FlowStep v : flowStepGraph.vertexSet()) {
        graph.addVertex(v);
    }
    for (ProcessEdge e : flowStepGraph.edgeSet()) {
        graph.addEdge(e.getSourceProcessID(), e.getSinkProcessID());
    }
    // convert graph from jgrapht to ambrose
    AmbroseCascadingGraphConverter converter = new AmbroseCascadingGraphConverter(graph, nodesByName);
    converter.convert();
    AmbroseUtils.sendDagNodeNameMap(statsWriteService, currentFlowId, nodesByName);
}
Also used : DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) FlowStepGraph(cascading.flow.planner.process.FlowStepGraph) BaseFlowStep(cascading.flow.planner.BaseFlowStep) FlowStep(cascading.flow.FlowStep) BaseFlowStep(cascading.flow.planner.BaseFlowStep) IOException(java.io.IOException) Properties(java.util.Properties) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) DirectedGraph(org.jgrapht.DirectedGraph) ProcessEdge(cascading.flow.planner.process.ProcessEdge)

Example 3 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project st-js by st-js.

the class AbstractSTJSMojo method packFiles.

/**
 * packs all the files in a single file
 *
 * @param generator
 *            a {@link org.stjs.generator.Generator} object.
 * @param gendir
 *            a {@link org.stjs.generator.GenerationDirectory} object.
 * @throws org.apache.maven.plugin.MojoFailureException
 * @throws org.apache.maven.plugin.MojoExecutionException
 */
protected void packFiles(Generator generator, GenerationDirectory gendir) throws MojoFailureException, MojoExecutionException {
    if (!pack) {
        return;
    }
    OutputStream allSourcesFile = null;
    Writer packMapStream = null;
    ClassLoader builtProjectClassLoader = getBuiltProjectClassLoader();
    Map<String, File> currentProjectsFiles = new HashMap<String, File>();
    // pack the files
    try {
        DirectedGraph<String, DefaultEdge> dependencyGraph = new DefaultDirectedGraph<String, DefaultEdge>(DefaultEdge.class);
        File outputFile = new File(gendir.getGeneratedSourcesAbsolutePath(), project.getArtifactId() + ".js");
        allSourcesFile = new BufferedOutputStream(new FileOutputStream(outputFile));
        for (String sourceRoot : getCompileSourceRoots()) {
            File sourceDir = new File(sourceRoot);
            List<File> sources = new ArrayList<File>();
            SourceMapping mapping = new SuffixMapping(".java", ".js");
            SourceMapping stjsMapping = new SuffixMapping(".java", ".stjs");
            // take all the files
            sources = accumulateSources(gendir, sourceDir, mapping, stjsMapping, Integer.MIN_VALUE);
            for (File source : sources) {
                File absoluteTarget = (File) mapping.getTargetFiles(gendir.getGeneratedSourcesAbsolutePath(), source.getPath()).iterator().next();
                String className = getClassNameForSource(source.getPath());
                if (!absoluteTarget.exists()) {
                    getLog().debug(className + " is a bridge. Don't add it to the pack file");
                    continue;
                }
                // add this file to the hashmap to know that this class is part of the project
                currentProjectsFiles.put(className, absoluteTarget);
                if (getLog().isDebugEnabled()) {
                    getLog().debug("Packing " + absoluteTarget);
                }
                ClassWithJavascript cjs = generator.getExistingStjsClass(builtProjectClassLoader, builtProjectClassLoader.loadClass(className));
                dependencyGraph.addVertex(className);
                for (Map.Entry<ClassWithJavascript, DependencyType> dep : cjs.getDirectDependencyMap().entrySet()) {
                    if (dep.getKey() instanceof STJSClass) {
                        dependencyGraph.addVertex(dep.getKey().getJavaClassName());
                        if (dep.getValue() != DependencyType.OTHER) {
                            dependencyGraph.addEdge(dep.getKey().getJavaClassName(), className);
                        }
                    }
                }
            }
        }
        // check for cycles
        detectCycles(dependencyGraph);
        // dump all the files in the dependency order in the pack file
        SourceMapGeneratorV3 packSourceMap = (SourceMapGeneratorV3) SourceMapGeneratorFactory.getInstance(SourceMapFormat.V3);
        int currentLine = 0;
        Iterator<String> it = new TopologicalOrderIterator<String, DefaultEdge>(dependencyGraph);
        while (it.hasNext()) {
            File targetFile = currentProjectsFiles.get(it.next());
            // target file is absolute
            if (targetFile != null) {
                // for this project's files
                if (generateSourceMap) {
                    currentLine = SourceMapUtils.appendFileSkipSourceMap(gendir.getGeneratedSourcesAbsolutePath(), allSourcesFile, targetFile, currentLine, packSourceMap, sourceEncoding);
                } else {
                    Files.copy(targetFile, allSourcesFile);
                }
                allSourcesFile.flush();
            }
        }
        if (generateSourceMap) {
            File packMapFile = new File(gendir.getGeneratedSourcesAbsolutePath(), project.getArtifactId() + ".map");
            packMapStream = new BufferedWriter(new FileWriter(packMapFile));
            packSourceMap.appendTo(packMapStream, project.getArtifactId() + ".js");
            allSourcesFile.write(("//# sourceMappingURL=" + project.getArtifactId() + ".map\n").getBytes());
            allSourcesFile.flush();
        }
    } catch (Exception ex) {
        throw new MojoFailureException("Error when packing files:" + ex.getMessage(), ex);
    } finally {
        try {
            Closeables.close(allSourcesFile, true);
        } catch (IOException e) {
            LOG.log(Level.SEVERE, "IOException should not have been thrown.", e);
        }
        try {
            Closeables.close(packMapStream, true);
        } catch (IOException e) {
            LOG.log(Level.SEVERE, "IOException should not have been thrown.", e);
        }
    }
}
Also used : HashMap(java.util.HashMap) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) TopologicalOrderIterator(org.jgrapht.traverse.TopologicalOrderIterator) STJSClass(org.stjs.generator.STJSClass) SuffixMapping(org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping) SourceMapGeneratorV3(com.google.debugging.sourcemap.SourceMapGeneratorV3) BufferedWriter(java.io.BufferedWriter) DependencyType(org.stjs.generator.name.DependencyType) URLClassLoader(java.net.URLClassLoader) BufferedOutputStream(java.io.BufferedOutputStream) MojoFailureException(org.apache.maven.plugin.MojoFailureException) DefaultEdge(org.jgrapht.graph.DefaultEdge) IOException(java.io.IOException) MultipleFileGenerationException(org.stjs.generator.MultipleFileGenerationException) DependencyResolutionRequiredException(org.apache.maven.artifact.DependencyResolutionRequiredException) JavascriptFileGenerationException(org.stjs.generator.JavascriptFileGenerationException) IOException(java.io.IOException) InclusionScanException(org.codehaus.plexus.compiler.util.scan.InclusionScanException) MojoExecutionException(org.apache.maven.plugin.MojoExecutionException) MojoFailureException(org.apache.maven.plugin.MojoFailureException) FileOutputStream(java.io.FileOutputStream) ClassWithJavascript(org.stjs.generator.ClassWithJavascript) File(java.io.File) SourceMapping(org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping) Map(java.util.Map) HashMap(java.util.HashMap) Writer(java.io.Writer) BufferedWriter(java.io.BufferedWriter) FileWriter(java.io.FileWriter)

Example 4 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project evosuite by EvoSuite.

the class RegexDistanceUtils method cacheRegex.

private static void cacheRegex(String regex) {
    String r = expandRegex(regex);
    Automaton automaton = new RegExp(r, RegExp.NONE).toAutomaton();
    automaton.expandSingleton();
    // We convert this to a graph without self-loops in order to determine the topological order
    DirectedGraph<State, DefaultEdge> regexGraph = new DefaultDirectedGraph<State, DefaultEdge>(DefaultEdge.class);
    Set<State> visitedStates = new HashSet<State>();
    Queue<State> states = new LinkedList<State>();
    State initialState = automaton.getInitialState();
    states.add(initialState);
    while (!states.isEmpty()) {
        State currentState = states.poll();
        if (visitedStates.contains(currentState))
            continue;
        if (!regexGraph.containsVertex(currentState))
            regexGraph.addVertex(currentState);
        for (Transition t : currentState.getTransitions()) {
            // Need to get rid of back edges, otherwise there is no topological order!
            if (!t.getDest().equals(currentState)) {
                regexGraph.addVertex(t.getDest());
                regexGraph.addEdge(currentState, t.getDest());
                states.add(t.getDest());
                CycleDetector<State, DefaultEdge> det = new CycleDetector<State, DefaultEdge>(regexGraph);
                if (det.detectCycles()) {
                    regexGraph.removeEdge(currentState, t.getDest());
                }
            }
        }
        visitedStates.add(currentState);
    }
    TopologicalOrderIterator<State, DefaultEdge> iterator = new TopologicalOrderIterator<State, DefaultEdge>(regexGraph);
    List<State> topologicalOrder = new ArrayList<State>();
    while (iterator.hasNext()) {
        topologicalOrder.add(iterator.next());
    }
    regexStateCache.put(regex, topologicalOrder);
    regexAutomatonCache.put(regex, automaton);
}
Also used : Automaton(dk.brics.automaton.Automaton) RegExp(dk.brics.automaton.RegExp) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) ArrayList(java.util.ArrayList) DefaultEdge(org.jgrapht.graph.DefaultEdge) TopologicalOrderIterator(org.jgrapht.traverse.TopologicalOrderIterator) LinkedList(java.util.LinkedList) CycleDetector(org.jgrapht.alg.CycleDetector) State(dk.brics.automaton.State) Transition(dk.brics.automaton.Transition) HashSet(java.util.HashSet)

Example 5 with DefaultDirectedGraph

use of org.jgrapht.graph.DefaultDirectedGraph in project storm by nathanmarz.

the class TridentTopology method build.

public StormTopology build() {
    DefaultDirectedGraph<Node, IndexedEdge> graph = (DefaultDirectedGraph) _graph.clone();
    completeDRPC(graph, _colocate, _gen);
    List<SpoutNode> spoutNodes = new ArrayList<SpoutNode>();
    // can be regular nodes (static state) or processor nodes
    Set<Node> boltNodes = new HashSet<Node>();
    for (Node n : graph.vertexSet()) {
        if (n instanceof SpoutNode) {
            spoutNodes.add((SpoutNode) n);
        } else if (!(n instanceof PartitionNode)) {
            boltNodes.add(n);
        }
    }
    Set<Group> initialGroups = new HashSet<Group>();
    for (List<Node> colocate : _colocate.values()) {
        Group g = new Group(graph, colocate);
        boltNodes.removeAll(colocate);
        initialGroups.add(g);
    }
    for (Node n : boltNodes) {
        initialGroups.add(new Group(graph, n));
    }
    GraphGrouper grouper = new GraphGrouper(graph, initialGroups);
    grouper.mergeFully();
    Collection<Group> mergedGroups = grouper.getAllGroups();
    // add identity partitions between groups
    for (IndexedEdge<Node> e : new HashSet<IndexedEdge>(graph.edgeSet())) {
        if (!(e.source instanceof PartitionNode) && !(e.target instanceof PartitionNode)) {
            Group g1 = grouper.nodeGroup(e.source);
            Group g2 = grouper.nodeGroup(e.target);
            // g1 being null means the source is a spout node
            if (g1 == null && !(e.source instanceof SpoutNode))
                throw new RuntimeException("Planner exception: Null source group must indicate a spout node at this phase of planning");
            if (g1 == null || !g1.equals(g2)) {
                graph.removeEdge(e);
                PartitionNode pNode = makeIdentityPartition(e.source);
                graph.addVertex(pNode);
                graph.addEdge(e.source, pNode, new IndexedEdge(e.source, pNode, 0));
                graph.addEdge(pNode, e.target, new IndexedEdge(pNode, e.target, e.index));
            }
        }
    }
    // if one group subscribes to the same stream with same partitioning multiple times,
    // merge those together (otherwise can end up with many output streams created for that partitioning
    // if need to split into multiple output streams because of same input having different
    // partitioning to the group)
    // this is because can't currently merge splitting logic into a spout
    // not the most kosher algorithm here, since the grouper indexes are being trounced via the adding of nodes to random groups, but it
    // works out
    List<Node> forNewGroups = new ArrayList<Node>();
    for (Group g : mergedGroups) {
        for (PartitionNode n : extraPartitionInputs(g)) {
            Node idNode = makeIdentityNode(n.allOutputFields);
            Node newPartitionNode = new PartitionNode(idNode.streamId, n.name, idNode.allOutputFields, n.thriftGrouping);
            Node parentNode = TridentUtils.getParent(graph, n);
            Set<IndexedEdge> outgoing = graph.outgoingEdgesOf(n);
            graph.removeVertex(n);
            graph.addVertex(idNode);
            graph.addVertex(newPartitionNode);
            addEdge(graph, parentNode, idNode, 0);
            addEdge(graph, idNode, newPartitionNode, 0);
            for (IndexedEdge e : outgoing) {
                addEdge(graph, newPartitionNode, e.target, e.index);
            }
            Group parentGroup = grouper.nodeGroup(parentNode);
            if (parentGroup == null) {
                forNewGroups.add(idNode);
            } else {
                parentGroup.nodes.add(idNode);
            }
        }
    }
    for (Node n : forNewGroups) {
        grouper.addGroup(new Group(graph, n));
    }
    // add in spouts as groups so we can get parallelisms
    for (Node n : spoutNodes) {
        grouper.addGroup(new Group(graph, n));
    }
    grouper.reindex();
    mergedGroups = grouper.getAllGroups();
    Map<Node, String> batchGroupMap = new HashMap();
    List<Set<Node>> connectedComponents = new ConnectivityInspector<Node, IndexedEdge>(graph).connectedSets();
    for (int i = 0; i < connectedComponents.size(); i++) {
        String groupId = "bg" + i;
        for (Node n : connectedComponents.get(i)) {
            batchGroupMap.put(n, groupId);
        }
    }
    // System.out.println("GRAPH:");
    // System.out.println(graph);
    Map<Group, Integer> parallelisms = getGroupParallelisms(graph, grouper, mergedGroups);
    TridentTopologyBuilder builder = new TridentTopologyBuilder();
    Map<Node, String> spoutIds = genSpoutIds(spoutNodes);
    Map<Group, String> boltIds = genBoltIds(mergedGroups);
    for (SpoutNode sn : spoutNodes) {
        Integer parallelism = parallelisms.get(grouper.nodeGroup(sn));
        if (sn.type == SpoutNode.SpoutType.DRPC) {
            builder.setBatchPerTupleSpout(spoutIds.get(sn), sn.streamId, (IRichSpout) sn.spout, parallelism, batchGroupMap.get(sn));
        } else {
            ITridentSpout s;
            if (sn.spout instanceof IBatchSpout) {
                s = new BatchSpoutExecutor((IBatchSpout) sn.spout);
            } else if (sn.spout instanceof ITridentSpout) {
                s = (ITridentSpout) sn.spout;
            } else {
                throw new RuntimeException("Regular rich spouts not supported yet... try wrapping in a RichSpoutBatchExecutor");
            // TODO: handle regular rich spout without batches (need lots of updates to support this throughout)
            }
            builder.setSpout(spoutIds.get(sn), sn.streamId, sn.txId, s, parallelism, batchGroupMap.get(sn));
        }
    }
    for (Group g : mergedGroups) {
        if (!isSpoutGroup(g)) {
            Integer p = parallelisms.get(g);
            Map<String, String> streamToGroup = getOutputStreamBatchGroups(g, batchGroupMap);
            BoltDeclarer d = builder.setBolt(boltIds.get(g), new SubtopologyBolt(graph, g.nodes, batchGroupMap), p, committerBatches(g, batchGroupMap), streamToGroup);
            Collection<PartitionNode> inputs = uniquedSubscriptions(externalGroupInputs(g));
            for (PartitionNode n : inputs) {
                Node parent = TridentUtils.getParent(graph, n);
                String componentId;
                if (parent instanceof SpoutNode) {
                    componentId = spoutIds.get(parent);
                } else {
                    componentId = boltIds.get(grouper.nodeGroup(parent));
                }
                d.grouping(new GlobalStreamId(componentId, n.streamId), n.thriftGrouping);
            }
        }
    }
    return builder.buildTopology();
}
Also used : Group(storm.trident.graph.Group) Set(java.util.Set) HashSet(java.util.HashSet) IBatchSpout(storm.trident.spout.IBatchSpout) DefaultDirectedGraph(org.jgrapht.graph.DefaultDirectedGraph) HashMap(java.util.HashMap) SpoutNode(storm.trident.planner.SpoutNode) ProcessorNode(storm.trident.planner.ProcessorNode) PartitionNode(storm.trident.planner.PartitionNode) Node(storm.trident.planner.Node) ArrayList(java.util.ArrayList) GraphGrouper(storm.trident.graph.GraphGrouper) IndexedEdge(storm.trident.util.IndexedEdge) BatchSpoutExecutor(storm.trident.spout.BatchSpoutExecutor) HashSet(java.util.HashSet) TridentTopologyBuilder(storm.trident.topology.TridentTopologyBuilder) SpoutNode(storm.trident.planner.SpoutNode) PartitionNode(storm.trident.planner.PartitionNode) BoltDeclarer(backtype.storm.topology.BoltDeclarer) GlobalStreamId(backtype.storm.generated.GlobalStreamId) SubtopologyBolt(storm.trident.planner.SubtopologyBolt) ITridentSpout(storm.trident.spout.ITridentSpout)

Aggregations

DefaultDirectedGraph (org.jgrapht.graph.DefaultDirectedGraph)8 HashMap (java.util.HashMap)5 DefaultEdge (org.jgrapht.graph.DefaultEdge)5 ArrayList (java.util.ArrayList)4 HashSet (java.util.HashSet)4 Set (java.util.Set)3 GlobalStreamId (backtype.storm.generated.GlobalStreamId)2 BoltDeclarer (backtype.storm.topology.BoltDeclarer)2 FileWriter (java.io.FileWriter)2 IOException (java.io.IOException)2 Map (java.util.Map)2 CycleDetector (org.jgrapht.alg.CycleDetector)2 TopologicalOrderIterator (org.jgrapht.traverse.TopologicalOrderIterator)2 FlowStep (cascading.flow.FlowStep)1 BaseFlowStep (cascading.flow.planner.BaseFlowStep)1 FlowStepGraph (cascading.flow.planner.process.FlowStepGraph)1 ProcessEdge (cascading.flow.planner.process.ProcessEdge)1 PlanFragment (com.facebook.presto.sql.planner.PlanFragment)1 InternalPlanVisitor (com.facebook.presto.sql.planner.plan.InternalPlanVisitor)1 PlanFragmentId (com.facebook.presto.sql.planner.plan.PlanFragmentId)1