use of org.jgrapht.graph.DefaultDirectedGraph in project jop by jop-devel.
the class Template method exportDOT.
public void exportDOT(File dbgFile) throws IOException {
DirectedGraph<Location, DefaultEdge> locGraph = new DefaultDirectedGraph<Location, DefaultEdge>(DefaultEdge.class);
for (Location l : this.locations.values()) locGraph.addVertex(l);
Map<DefaultEdge, String> edgeMap = new HashMap<DefaultEdge, String>();
for (Transition t : this.transitions) {
DefaultEdge e = locGraph.addEdge(t.getSource(), t.getTarget());
edgeMap.put(e, t.getAttrs().toString());
}
FileWriter fw = new FileWriter(dbgFile);
AdvancedDOTExporter.DOTNodeLabeller<Location> nodeLabeller = new AdvancedDOTExporter.DefaultNodeLabeller<Location>() {
public int getID(Location node) {
return node.getId();
}
public String getLabel(Location node) {
return node.getName();
}
};
AdvancedDOTExporter.DOTLabeller<DefaultEdge> edgeLabeller = new AdvancedDOTExporter.MapLabeller<DefaultEdge>(edgeMap);
AdvancedDOTExporter<Location, DefaultEdge> dotExport = new AdvancedDOTExporter<Location, DefaultEdge>(nodeLabeller, edgeLabeller);
dotExport.exportDOT(fw, locGraph);
fw.close();
}
use of org.jgrapht.graph.DefaultDirectedGraph in project ambrose by twitter.
the class AmbroseCascadingNotifier method onStarting.
/**
* The onStarting event is fired when a Flow instance receives the start() message. A Flow is cut
* down into executing units called stepFlow. A stepFlow contains a stepFlowJob which represents
* the mapreduce job to be submitted to Hadoop. The ambrose graph is constructed from the step
* graph found in flow object.
*
* @param flow the flow.
*/
@Override
@SuppressWarnings("unchecked")
public void onStarting(Flow flow) {
// init flow
List<FlowStep> steps = flow.getFlowSteps();
totalNumberOfJobs = steps.size();
currentFlowId = flow.getID();
Properties props = new Properties();
props.putAll(flow.getConfigAsProperties());
try {
statsWriteService.initWriteService(props);
} catch (IOException e) {
LOG.error("Failed to initialize statsWriteService", e);
}
// convert graph from cascading to jgrapht
FlowStepGraph flowStepGraph = Flows.getStepGraphFrom(flow);
DirectedGraph graph = new DefaultDirectedGraph<BaseFlowStep, FlowGraphEdge>(new EdgeFactory<BaseFlowStep, FlowGraphEdge>() {
@Override
public FlowGraphEdge createEdge(BaseFlowStep src, BaseFlowStep dest) {
return new FlowGraphEdge(src.getID(), dest.getID());
}
});
for (FlowStep v : flowStepGraph.vertexSet()) {
graph.addVertex(v);
}
for (ProcessEdge e : flowStepGraph.edgeSet()) {
graph.addEdge(e.getSourceProcessID(), e.getSinkProcessID());
}
// convert graph from jgrapht to ambrose
AmbroseCascadingGraphConverter converter = new AmbroseCascadingGraphConverter(graph, nodesByName);
converter.convert();
AmbroseUtils.sendDagNodeNameMap(statsWriteService, currentFlowId, nodesByName);
}
use of org.jgrapht.graph.DefaultDirectedGraph in project st-js by st-js.
the class AbstractSTJSMojo method packFiles.
/**
* packs all the files in a single file
*
* @param generator
* a {@link org.stjs.generator.Generator} object.
* @param gendir
* a {@link org.stjs.generator.GenerationDirectory} object.
* @throws org.apache.maven.plugin.MojoFailureException
* @throws org.apache.maven.plugin.MojoExecutionException
*/
protected void packFiles(Generator generator, GenerationDirectory gendir) throws MojoFailureException, MojoExecutionException {
if (!pack) {
return;
}
OutputStream allSourcesFile = null;
Writer packMapStream = null;
ClassLoader builtProjectClassLoader = getBuiltProjectClassLoader();
Map<String, File> currentProjectsFiles = new HashMap<String, File>();
// pack the files
try {
DirectedGraph<String, DefaultEdge> dependencyGraph = new DefaultDirectedGraph<String, DefaultEdge>(DefaultEdge.class);
File outputFile = new File(gendir.getGeneratedSourcesAbsolutePath(), project.getArtifactId() + ".js");
allSourcesFile = new BufferedOutputStream(new FileOutputStream(outputFile));
for (String sourceRoot : getCompileSourceRoots()) {
File sourceDir = new File(sourceRoot);
List<File> sources = new ArrayList<File>();
SourceMapping mapping = new SuffixMapping(".java", ".js");
SourceMapping stjsMapping = new SuffixMapping(".java", ".stjs");
// take all the files
sources = accumulateSources(gendir, sourceDir, mapping, stjsMapping, Integer.MIN_VALUE);
for (File source : sources) {
File absoluteTarget = (File) mapping.getTargetFiles(gendir.getGeneratedSourcesAbsolutePath(), source.getPath()).iterator().next();
String className = getClassNameForSource(source.getPath());
if (!absoluteTarget.exists()) {
getLog().debug(className + " is a bridge. Don't add it to the pack file");
continue;
}
// add this file to the hashmap to know that this class is part of the project
currentProjectsFiles.put(className, absoluteTarget);
if (getLog().isDebugEnabled()) {
getLog().debug("Packing " + absoluteTarget);
}
ClassWithJavascript cjs = generator.getExistingStjsClass(builtProjectClassLoader, builtProjectClassLoader.loadClass(className));
dependencyGraph.addVertex(className);
for (Map.Entry<ClassWithJavascript, DependencyType> dep : cjs.getDirectDependencyMap().entrySet()) {
if (dep.getKey() instanceof STJSClass) {
dependencyGraph.addVertex(dep.getKey().getJavaClassName());
if (dep.getValue() != DependencyType.OTHER) {
dependencyGraph.addEdge(dep.getKey().getJavaClassName(), className);
}
}
}
}
}
// check for cycles
detectCycles(dependencyGraph);
// dump all the files in the dependency order in the pack file
SourceMapGeneratorV3 packSourceMap = (SourceMapGeneratorV3) SourceMapGeneratorFactory.getInstance(SourceMapFormat.V3);
int currentLine = 0;
Iterator<String> it = new TopologicalOrderIterator<String, DefaultEdge>(dependencyGraph);
while (it.hasNext()) {
File targetFile = currentProjectsFiles.get(it.next());
// target file is absolute
if (targetFile != null) {
// for this project's files
if (generateSourceMap) {
currentLine = SourceMapUtils.appendFileSkipSourceMap(gendir.getGeneratedSourcesAbsolutePath(), allSourcesFile, targetFile, currentLine, packSourceMap, sourceEncoding);
} else {
Files.copy(targetFile, allSourcesFile);
}
allSourcesFile.flush();
}
}
if (generateSourceMap) {
File packMapFile = new File(gendir.getGeneratedSourcesAbsolutePath(), project.getArtifactId() + ".map");
packMapStream = new BufferedWriter(new FileWriter(packMapFile));
packSourceMap.appendTo(packMapStream, project.getArtifactId() + ".js");
allSourcesFile.write(("//# sourceMappingURL=" + project.getArtifactId() + ".map\n").getBytes());
allSourcesFile.flush();
}
} catch (Exception ex) {
throw new MojoFailureException("Error when packing files:" + ex.getMessage(), ex);
} finally {
try {
Closeables.close(allSourcesFile, true);
} catch (IOException e) {
LOG.log(Level.SEVERE, "IOException should not have been thrown.", e);
}
try {
Closeables.close(packMapStream, true);
} catch (IOException e) {
LOG.log(Level.SEVERE, "IOException should not have been thrown.", e);
}
}
}
use of org.jgrapht.graph.DefaultDirectedGraph in project evosuite by EvoSuite.
the class RegexDistanceUtils method cacheRegex.
private static void cacheRegex(String regex) {
String r = expandRegex(regex);
Automaton automaton = new RegExp(r, RegExp.NONE).toAutomaton();
automaton.expandSingleton();
// We convert this to a graph without self-loops in order to determine the topological order
DirectedGraph<State, DefaultEdge> regexGraph = new DefaultDirectedGraph<State, DefaultEdge>(DefaultEdge.class);
Set<State> visitedStates = new HashSet<State>();
Queue<State> states = new LinkedList<State>();
State initialState = automaton.getInitialState();
states.add(initialState);
while (!states.isEmpty()) {
State currentState = states.poll();
if (visitedStates.contains(currentState))
continue;
if (!regexGraph.containsVertex(currentState))
regexGraph.addVertex(currentState);
for (Transition t : currentState.getTransitions()) {
// Need to get rid of back edges, otherwise there is no topological order!
if (!t.getDest().equals(currentState)) {
regexGraph.addVertex(t.getDest());
regexGraph.addEdge(currentState, t.getDest());
states.add(t.getDest());
CycleDetector<State, DefaultEdge> det = new CycleDetector<State, DefaultEdge>(regexGraph);
if (det.detectCycles()) {
regexGraph.removeEdge(currentState, t.getDest());
}
}
}
visitedStates.add(currentState);
}
TopologicalOrderIterator<State, DefaultEdge> iterator = new TopologicalOrderIterator<State, DefaultEdge>(regexGraph);
List<State> topologicalOrder = new ArrayList<State>();
while (iterator.hasNext()) {
topologicalOrder.add(iterator.next());
}
regexStateCache.put(regex, topologicalOrder);
regexAutomatonCache.put(regex, automaton);
}
use of org.jgrapht.graph.DefaultDirectedGraph in project storm by nathanmarz.
the class TridentTopology method build.
public StormTopology build() {
DefaultDirectedGraph<Node, IndexedEdge> graph = (DefaultDirectedGraph) _graph.clone();
completeDRPC(graph, _colocate, _gen);
List<SpoutNode> spoutNodes = new ArrayList<SpoutNode>();
// can be regular nodes (static state) or processor nodes
Set<Node> boltNodes = new HashSet<Node>();
for (Node n : graph.vertexSet()) {
if (n instanceof SpoutNode) {
spoutNodes.add((SpoutNode) n);
} else if (!(n instanceof PartitionNode)) {
boltNodes.add(n);
}
}
Set<Group> initialGroups = new HashSet<Group>();
for (List<Node> colocate : _colocate.values()) {
Group g = new Group(graph, colocate);
boltNodes.removeAll(colocate);
initialGroups.add(g);
}
for (Node n : boltNodes) {
initialGroups.add(new Group(graph, n));
}
GraphGrouper grouper = new GraphGrouper(graph, initialGroups);
grouper.mergeFully();
Collection<Group> mergedGroups = grouper.getAllGroups();
// add identity partitions between groups
for (IndexedEdge<Node> e : new HashSet<IndexedEdge>(graph.edgeSet())) {
if (!(e.source instanceof PartitionNode) && !(e.target instanceof PartitionNode)) {
Group g1 = grouper.nodeGroup(e.source);
Group g2 = grouper.nodeGroup(e.target);
// g1 being null means the source is a spout node
if (g1 == null && !(e.source instanceof SpoutNode))
throw new RuntimeException("Planner exception: Null source group must indicate a spout node at this phase of planning");
if (g1 == null || !g1.equals(g2)) {
graph.removeEdge(e);
PartitionNode pNode = makeIdentityPartition(e.source);
graph.addVertex(pNode);
graph.addEdge(e.source, pNode, new IndexedEdge(e.source, pNode, 0));
graph.addEdge(pNode, e.target, new IndexedEdge(pNode, e.target, e.index));
}
}
}
// if one group subscribes to the same stream with same partitioning multiple times,
// merge those together (otherwise can end up with many output streams created for that partitioning
// if need to split into multiple output streams because of same input having different
// partitioning to the group)
// this is because can't currently merge splitting logic into a spout
// not the most kosher algorithm here, since the grouper indexes are being trounced via the adding of nodes to random groups, but it
// works out
List<Node> forNewGroups = new ArrayList<Node>();
for (Group g : mergedGroups) {
for (PartitionNode n : extraPartitionInputs(g)) {
Node idNode = makeIdentityNode(n.allOutputFields);
Node newPartitionNode = new PartitionNode(idNode.streamId, n.name, idNode.allOutputFields, n.thriftGrouping);
Node parentNode = TridentUtils.getParent(graph, n);
Set<IndexedEdge> outgoing = graph.outgoingEdgesOf(n);
graph.removeVertex(n);
graph.addVertex(idNode);
graph.addVertex(newPartitionNode);
addEdge(graph, parentNode, idNode, 0);
addEdge(graph, idNode, newPartitionNode, 0);
for (IndexedEdge e : outgoing) {
addEdge(graph, newPartitionNode, e.target, e.index);
}
Group parentGroup = grouper.nodeGroup(parentNode);
if (parentGroup == null) {
forNewGroups.add(idNode);
} else {
parentGroup.nodes.add(idNode);
}
}
}
for (Node n : forNewGroups) {
grouper.addGroup(new Group(graph, n));
}
// add in spouts as groups so we can get parallelisms
for (Node n : spoutNodes) {
grouper.addGroup(new Group(graph, n));
}
grouper.reindex();
mergedGroups = grouper.getAllGroups();
Map<Node, String> batchGroupMap = new HashMap();
List<Set<Node>> connectedComponents = new ConnectivityInspector<Node, IndexedEdge>(graph).connectedSets();
for (int i = 0; i < connectedComponents.size(); i++) {
String groupId = "bg" + i;
for (Node n : connectedComponents.get(i)) {
batchGroupMap.put(n, groupId);
}
}
// System.out.println("GRAPH:");
// System.out.println(graph);
Map<Group, Integer> parallelisms = getGroupParallelisms(graph, grouper, mergedGroups);
TridentTopologyBuilder builder = new TridentTopologyBuilder();
Map<Node, String> spoutIds = genSpoutIds(spoutNodes);
Map<Group, String> boltIds = genBoltIds(mergedGroups);
for (SpoutNode sn : spoutNodes) {
Integer parallelism = parallelisms.get(grouper.nodeGroup(sn));
if (sn.type == SpoutNode.SpoutType.DRPC) {
builder.setBatchPerTupleSpout(spoutIds.get(sn), sn.streamId, (IRichSpout) sn.spout, parallelism, batchGroupMap.get(sn));
} else {
ITridentSpout s;
if (sn.spout instanceof IBatchSpout) {
s = new BatchSpoutExecutor((IBatchSpout) sn.spout);
} else if (sn.spout instanceof ITridentSpout) {
s = (ITridentSpout) sn.spout;
} else {
throw new RuntimeException("Regular rich spouts not supported yet... try wrapping in a RichSpoutBatchExecutor");
// TODO: handle regular rich spout without batches (need lots of updates to support this throughout)
}
builder.setSpout(spoutIds.get(sn), sn.streamId, sn.txId, s, parallelism, batchGroupMap.get(sn));
}
}
for (Group g : mergedGroups) {
if (!isSpoutGroup(g)) {
Integer p = parallelisms.get(g);
Map<String, String> streamToGroup = getOutputStreamBatchGroups(g, batchGroupMap);
BoltDeclarer d = builder.setBolt(boltIds.get(g), new SubtopologyBolt(graph, g.nodes, batchGroupMap), p, committerBatches(g, batchGroupMap), streamToGroup);
Collection<PartitionNode> inputs = uniquedSubscriptions(externalGroupInputs(g));
for (PartitionNode n : inputs) {
Node parent = TridentUtils.getParent(graph, n);
String componentId;
if (parent instanceof SpoutNode) {
componentId = spoutIds.get(parent);
} else {
componentId = boltIds.get(grouper.nodeGroup(parent));
}
d.grouping(new GlobalStreamId(componentId, n.streamId), n.thriftGrouping);
}
}
}
return builder.buildTopology();
}
Aggregations