use of org.antlr.v4.runtime.Lexer in project Dat3M by hernanponcedeleon.
the class Porthos method main.
public static void main(String[] args) throws Z3Exception, IOException {
List<String> MCMs = Arrays.asList("sc", "tso", "pso", "rmo", "alpha", "power", "arm");
Options options = new Options();
Option sourceOpt = new Option("s", "source", true, "source MCM");
sourceOpt.setRequired(true);
options.addOption(sourceOpt);
Option targetOpt = new Option("t", "target", true, "target MCM");
targetOpt.setRequired(true);
options.addOption(targetOpt);
Option inputOpt = new Option("i", "input", true, "input file path");
inputOpt.setRequired(true);
options.addOption(inputOpt);
options.addOption("state", false, "PORTHOS performs state portability");
options.addOption(Option.builder("draw").hasArg().desc("If a buf is found, it outputs a graph \\path_to_file.dot").build());
options.addOption(Option.builder("rels").hasArgs().desc("Relations to be drawn in the graph").build());
options.addOption(Option.builder("unroll").hasArg().desc("Unrolling steps").build());
CommandLineParser parserCmd = new DefaultParser();
HelpFormatter formatter = new HelpFormatter();
CommandLine cmd;
try {
cmd = parserCmd.parse(options, args);
} catch (ParseException e) {
System.out.println(e.getMessage());
formatter.printHelp("PORTHOS", options);
System.exit(1);
return;
}
String source = cmd.getOptionValue("source");
if (!MCMs.stream().anyMatch(mcms -> mcms.trim().equals(source))) {
System.out.println("Unrecognized source");
System.exit(0);
return;
}
String target = cmd.getOptionValue("target");
if (!MCMs.stream().anyMatch(mcms -> mcms.trim().equals(target))) {
System.out.println("Unrecognized target");
System.exit(0);
return;
}
String inputFilePath = cmd.getOptionValue("input");
if (!inputFilePath.endsWith("pts") && !inputFilePath.endsWith("litmus")) {
System.out.println("Unrecognized program format");
System.exit(0);
return;
}
File file = new File(inputFilePath);
boolean statePortability = cmd.hasOption("state");
String[] rels = new String[100];
if (cmd.hasOption("rels")) {
rels = cmd.getOptionValues("rels");
}
String program = FileUtils.readFileToString(file, "UTF-8");
ANTLRInputStream input = new ANTLRInputStream(program);
Program p = new Program(inputFilePath);
if (inputFilePath.endsWith("litmus")) {
LitmusLexer lexer = new LitmusLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
LitmusParser parser = new LitmusParser(tokens);
p = parser.program(inputFilePath).p;
}
if (inputFilePath.endsWith("pts")) {
PorthosLexer lexer = new PorthosLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
PorthosParser parser = new PorthosParser(tokens);
p = parser.program(inputFilePath).p;
}
int steps = 1;
if (cmd.hasOption("unroll")) {
steps = Integer.parseInt(cmd.getOptionValue("unroll"));
}
p.initialize(steps);
Program pSource = p.clone();
Program pTarget = p.clone();
pSource.compile(source, false, true);
Integer startEId = Collections.max(pSource.getEvents().stream().filter(e -> e instanceof Init).map(e -> e.getEId()).collect(Collectors.toSet())) + 1;
pTarget.compile(target, false, true, startEId);
Context ctx = new Context();
ctx.setPrintMode(Z3_ast_print_mode.Z3_PRINT_SMTLIB_FULL);
Solver s = ctx.mkSolver();
Solver s2 = ctx.mkSolver();
BoolExpr sourceDF = pSource.encodeDF(ctx);
BoolExpr sourceCF = pSource.encodeCF(ctx);
BoolExpr sourceDF_RF = pSource.encodeDF_RF(ctx);
BoolExpr sourceDomain = Domain.encode(pSource, ctx);
BoolExpr sourceMM = pSource.encodeMM(ctx, source);
s.add(pTarget.encodeDF(ctx));
s.add(pTarget.encodeCF(ctx));
s.add(pTarget.encodeDF_RF(ctx));
s.add(Domain.encode(pTarget, ctx));
s.add(pTarget.encodeMM(ctx, target));
s.add(pTarget.encodeConsistent(ctx, target));
s.add(sourceDF);
s.add(sourceCF);
s.add(sourceDF_RF);
s.add(sourceDomain);
s.add(sourceMM);
s.add(pSource.encodeInconsistent(ctx, source));
s.add(encodeCommonExecutions(pTarget, pSource, ctx));
s2.add(sourceDF);
s2.add(sourceCF);
s2.add(sourceDF_RF);
s2.add(sourceDomain);
s2.add(sourceMM);
s2.add(pSource.encodeConsistent(ctx, source));
if (!statePortability) {
if (s.check() == Status.SATISFIABLE) {
System.out.println("The program is not portable");
// System.out.println(" 0");
if (cmd.hasOption("draw")) {
String outputPath = cmd.getOptionValue("draw");
Utils.drawGraph(p, pSource, pTarget, ctx, s.getModel(), outputPath, rels);
}
return;
} else {
System.out.println("The program is portable");
// System.out.println(" 1");
return;
}
}
int iterations = 0;
Status lastCheck = Status.SATISFIABLE;
Set<Expr> visited = new HashSet<Expr>();
while (lastCheck == Status.SATISFIABLE) {
lastCheck = s.check();
if (lastCheck == Status.SATISFIABLE) {
iterations = iterations + 1;
Model model = s.getModel();
s2.push();
BoolExpr reachedState = encodeReachedState(pTarget, model, ctx);
visited.add(reachedState);
assert (iterations == visited.size());
s2.add(reachedState);
if (s2.check() == Status.UNSATISFIABLE) {
System.out.println("The program is not state-portable");
System.out.println("Iterations: " + iterations);
// System.out.println(" 0");
return;
} else {
s2.pop();
s.add(ctx.mkNot(reachedState));
}
} else {
System.out.println("The program is state-portable");
System.out.println("Iterations: " + iterations);
// System.out.println(" 1");
return;
}
}
}
use of org.antlr.v4.runtime.Lexer in project beakerx by twosigma.
the class GroovyAutocomplete method tryFindAutocomplete.
private AutocompleteResult tryFindAutocomplete(String txt, int cur, ClassLoader l, Imports imports) {
registry = AutocompleteRegistryFactory.createRegistry(cps);
GroovyClassUtils cu = createClassUtils(l);
setup(cu, registry);
AutocompleteRegistryFactory.addDefaultImports(cu, registry, imports.toListOfStrings(), cps);
AutocompleteRegistryFactory.moreSetup(cu);
Lexer lexer = new GroovyLexer(new ANTLRInputStream(txt));
lexer.removeErrorListeners();
CommonTokenStream tokens = new CommonTokenStream(lexer);
// Create a parser that reads from the scanner
GroovyParser parser = new GroovyParser(tokens);
parser.removeErrorListeners();
// start parsing at the compilationUnit rule
ParserRuleContext t = parser.compilationUnit();
ParseTreeWalker walker = new ParseTreeWalker();
List<AutocompleteCandidate> q = new ArrayList<>();
GroovyImportDeclarationCompletion extractor = new GroovyImportDeclarationCompletion(txt, cur, registry, cps, cu);
GroovyNameBuilder extractor2 = new GroovyNameBuilder(registry, cu);
GroovyNodeCompletion extractor3 = new GroovyNodeCompletion(txt, cur, registry, cu);
walker.walk(extractor, t);
if (extractor.getQuery() != null)
q.addAll(extractor.getQuery());
walker.walk(extractor2, t);
walker.walk(extractor3, t);
if (extractor3.getQuery() != null)
q.addAll(extractor3.getQuery());
List<String> ret = registry.searchCandidates(q);
if (!ret.isEmpty()) {
return new AutocompleteResult(ret, getStartIndex(extractor, extractor2, extractor3));
}
return findAutocompleteResult(txt, cur, cu);
}
use of org.antlr.v4.runtime.Lexer in project bacter by tgvaughan.
the class ConversionGraph method fromExtendedNewick.
/**
* Read in an ACG from a string in extended newick format. Assumes
* that the network is stored with exactly the same metadata as written
* by the getExtendedNewick() method.
*
* @param string extended newick representation of ACG
* @param numbered true indicates that the ACG is numbered.
*/
public void fromExtendedNewick(String string, boolean numbered, int nodeNumberoffset) {
// Spin up ANTLR
CharStream input = CharStreams.fromString(string);
ExtendedNewickLexer lexer = new ExtendedNewickLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
ExtendedNewickParser parser = new ExtendedNewickParser(tokens);
ParseTree parseTree = parser.tree();
Map<String, Conversion> convIDMap = new HashMap<>();
Node root = new ExtendedNewickBaseVisitor<Node>() {
/**
* Convert branch lengths to node heights for all nodes in clade.
*
* @param node clade parent
* @return minimum height assigned in clade.
*/
private double branchLengthsToHeights(Node node) {
if (node.isRoot())
node.setHeight(0.0);
else
node.setHeight(node.getParent().getHeight() - node.getHeight());
double minHeight = node.getHeight();
for (Node child : node.getChildren()) {
minHeight = Math.min(minHeight, branchLengthsToHeights(child));
}
return minHeight;
}
/**
* Remove height offset from all nodes in clade
* @param node parent of clade
* @param offset offset to remove
*/
private void removeOffset(Node node, double offset) {
node.setHeight(node.getHeight() - offset);
for (Node child : node.getChildren()) removeOffset(child, offset);
}
private Node getTrueNode(Node node) {
if (node.isLeaf()) {
assert !convIDMap.containsKey(node.getID());
return node;
}
if (convIDMap.containsKey(node.getID()))
return getTrueNode(node.getChild(0));
int hybridIdx = -1;
int nonHybridIdx = -1;
for (int i = 0; i < node.getChildCount(); i++) {
if (node.getChild(i).isLeaf() && convIDMap.containsKey(node.getChild(i).getID()))
hybridIdx = i;
else
nonHybridIdx = i;
}
if (hybridIdx > 0)
return getTrueNode(node.getChild(nonHybridIdx));
return node;
}
/**
* Traverse the newly constructed tree looking for
* hybrid nodes and using these to set the heights of
* Conversion objects.
*
* @param node parent of clade
*/
private void findConversionAttachments(Node node) {
if (convIDMap.containsKey(node.getID())) {
Conversion conv = convIDMap.get(node.getID());
if (node.isLeaf()) {
conv.setHeight1(node.getHeight());
conv.setHeight2(node.getParent().getHeight());
conv.setNode2(getTrueNode(node.getParent()));
} else
conv.setNode1(getTrueNode(node));
}
for (Node child : node.getChildren()) findConversionAttachments(child);
}
/**
* Remove all conversion-associated nodes, leaving only
* the clonal frame.
*
* @param node parent of clade
* @return new parent of same clade
*/
private Node stripHybridNodes(Node node) {
Node trueNode = getTrueNode(node);
List<Node> trueChildren = new ArrayList<>();
for (Node child : trueNode.getChildren()) {
trueChildren.add(stripHybridNodes(child));
}
trueNode.removeAllChildren(false);
for (Node trueChild : trueChildren) trueNode.addChild(trueChild);
return trueNode;
}
private int numberInternalNodes(Node node, int nextNr) {
if (node.isLeaf())
return nextNr;
for (Node child : node.getChildren()) nextNr = numberInternalNodes(child, nextNr);
node.setNr(nextNr);
return nextNr + 1;
}
@Override
public Node visitTree(ExtendedNewickParser.TreeContext ctx) {
Node root = visitNode(ctx.node());
double minHeight = branchLengthsToHeights(root);
removeOffset(root, minHeight);
findConversionAttachments(root);
root = stripHybridNodes(root);
root.setParent(null);
if (!numbered)
numberInternalNodes(root, root.getAllLeafNodes().size());
return root;
}
@Override
public Node visitNode(ExtendedNewickParser.NodeContext ctx) {
Node node = new Node();
if (ctx.post().hybrid() != null) {
String convID = ctx.post().hybrid().getText();
node.setID(convID);
Conversion conv;
if (convIDMap.containsKey(convID))
conv = convIDMap.get(convID);
else {
conv = new Conversion();
convIDMap.put(convID, conv);
}
if (ctx.node().isEmpty()) {
String locusID;
for (ExtendedNewickParser.AttribContext attribCtx : ctx.post().meta().attrib()) {
switch(attribCtx.attribKey.getText()) {
case "region":
conv.setStartSite(Integer.parseInt(attribCtx.attribValue().vector().attribValue(0).getText()));
conv.setEndSite(Integer.parseInt(attribCtx.attribValue().vector().attribValue(1).getText()));
break;
case "locus":
locusID = attribCtx.attribValue().getText();
if (locusID.startsWith("\""))
locusID = locusID.substring(1, locusID.length() - 1);
Locus locus = null;
for (Locus thisLocus : getLoci()) {
if (thisLocus.getID().equals(locusID))
locus = thisLocus;
}
if (locus == null)
throw new IllegalArgumentException("Locus with ID " + locusID + " not found.");
conv.setLocus(locus);
break;
default:
break;
}
}
}
}
for (ExtendedNewickParser.NodeContext childCtx : ctx.node()) node.addChild(visitNode(childCtx));
if (ctx.post().label() != null) {
node.setID(ctx.post().label().getText());
node.setNr(Integer.parseInt(ctx.post().label().getText()) - nodeNumberoffset);
}
node.setHeight(Double.parseDouble(ctx.post().length.getText()));
return node;
}
}.visit(parseTree);
m_nodes = root.getAllChildNodesAndSelf().toArray(m_nodes);
nodeCount = m_nodes.length;
leafNodeCount = root.getAllLeafNodes().size();
setRoot(root);
initArrays();
for (Locus locus : getLoci()) convs.get(locus).clear();
for (Conversion conv : convIDMap.values()) addConversion(conv);
}
use of org.antlr.v4.runtime.Lexer in project beakerx by twosigma.
the class JavaAutocomplete method find.
private AutocompleteResult find(String txt, int cur, ClassLoader l, Imports imports) {
registry = AutocompleteRegistryFactory.createRegistry(cps);
ClassUtils cu = createClassUtils(l);
setup(cu, registry);
AutocompleteRegistryFactory.addDefaultImports(cu, registry, imports.toListOfStrings(), cps);
Lexer lexer = new JavaLexer(new ANTLRInputStream(txt));
CommonTokenStream tokens = new CommonTokenStream(lexer);
// Create a parser that reads from the scanner
JavaParser parser = new JavaParser(tokens);
parser.removeErrorListeners();
// start parsing at the compilationUnit rule
ParserRuleContext t = parser.compilationUnit();
ParseTreeWalker walker = new ParseTreeWalker();
List<AutocompleteCandidate> q = new ArrayList<AutocompleteCandidate>();
JavaImportDeclarationCompletion extractor = new JavaImportDeclarationCompletion(txt, cur, registry, cps, cu);
JavaNameBuilder extractor2 = new JavaNameBuilder(registry, cu);
JavaNodeCompletion extractor3 = new JavaNodeCompletion(txt, cur, registry, cu);
walker.walk(extractor, t);
if (extractor.getQuery() != null)
q.addAll(extractor.getQuery());
walker.walk(extractor2, t);
walker.walk(extractor3, t);
if (extractor3.getQuery() != null)
q.addAll(extractor3.getQuery());
List<String> ret = registry.searchCandidates(q);
if (!ret.isEmpty()) {
return new AutocompleteResult(ret, getStartIndex(extractor, extractor2, extractor3));
}
return findAutocompleteResult(txt, cur, cu);
}
use of org.antlr.v4.runtime.Lexer in project vespa by vespa-engine.
the class ProgramParser method prepareParser.
private yqlplusParser prepareParser(String programName, CharStream input) {
yqlplusLexer lexer = new yqlplusLexer(input);
lexer.removeErrorListeners();
lexer.addErrorListener(new BaseErrorListener() {
@Override
public void syntaxError(@NotNull Recognizer<?, ?> recognizer, @Nullable Object offendingSymbol, int line, int charPositionInLine, @NotNull String msg, @Nullable RecognitionException e) {
throw new ProgramCompileException(new Location(programName, line, charPositionInLine), msg);
}
});
TokenStream tokens = new CommonTokenStream(lexer);
yqlplusParser parser = new yqlplusParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(new BaseErrorListener() {
@Override
public void syntaxError(@NotNull Recognizer<?, ?> recognizer, @Nullable Object offendingSymbol, int line, int charPositionInLine, @NotNull String msg, @Nullable RecognitionException e) {
throw new ProgramCompileException(new Location(programName, line, charPositionInLine), msg);
}
});
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
return parser;
}
Aggregations