use of edu.princeton.cs.algs4.ST in project antlr4 by antlr.
the class Target method getBaseVisitorFileName.
/** A given grammar T, return a blank listener implementation
* such as TBaseListener.java, if we're using the Java target.
*/
public String getBaseVisitorFileName(boolean header) {
assert gen.g.name != null;
ST extST = getTemplates().getInstanceOf("codeFileExtension");
String listenerName = gen.g.name + "BaseVisitor";
return listenerName + extST.render();
}
use of edu.princeton.cs.algs4.ST in project antlr4 by antlr.
the class Target method getImplicitTokenLabel.
// should be same for all refs to same token like ctx.ID within single rule function
// for literals like 'while', we gen _s<ttype>
public String getImplicitTokenLabel(String tokenName) {
ST st = getTemplates().getInstanceOf("ImplicitTokenLabel");
int ttype = getCodeGenerator().g.getTokenType(tokenName);
if (tokenName.startsWith("'")) {
return "s" + ttype;
}
String text = getTokenTypeAsTargetLabel(getCodeGenerator().g, ttype);
st.add("tokenName", text);
return st.render();
}
use of edu.princeton.cs.algs4.ST in project antlr4 by antlr.
the class BaseNodeTest method testActions.
public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException {
int lp = templates.indexOf('(');
String name = templates.substring(0, lp);
STGroup group = new STGroupString(templates);
ST st = group.getInstanceOf(name);
st.add(actionName, action);
String grammar = st.render();
ErrorQueue equeue = new ErrorQueue();
Grammar g = new Grammar(grammar, equeue);
if (g.ast != null && !g.ast.hasErrors) {
SemanticPipeline sem = new SemanticPipeline(g);
sem.process();
ATNFactory factory = new ParserATNFactory(g);
if (g.isLexer())
factory = new LexerATNFactory((LexerGrammar) g);
g.atn = factory.createATN();
CodeGenerator gen = new CodeGenerator(g);
ST outputFileST = gen.generateParser();
String output = outputFileST.render();
// System.out.println(output);
String b = "#" + actionName + "#";
int start = output.indexOf(b);
String e = "#end-" + actionName + "#";
int end = output.indexOf(e);
String snippet = output.substring(start + b.length(), end);
assertEquals(expected, snippet);
}
if (equeue.size() > 0) {
System.err.println(equeue.toString());
}
}
use of edu.princeton.cs.algs4.ST in project antlr4 by antlr.
the class BasePythonTest method testActions.
public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException {
int lp = templates.indexOf('(');
String name = templates.substring(0, lp);
STGroup group = new STGroupString(templates);
ST st = group.getInstanceOf(name);
st.add(actionName, action);
String grammar = st.render();
ErrorQueue equeue = new ErrorQueue();
Grammar g = new Grammar(grammar, equeue);
if (g.ast != null && !g.ast.hasErrors) {
SemanticPipeline sem = new SemanticPipeline(g);
sem.process();
ATNFactory factory = new ParserATNFactory(g);
if (g.isLexer())
factory = new LexerATNFactory((LexerGrammar) g);
g.atn = factory.createATN();
CodeGenerator gen = new CodeGenerator(g);
ST outputFileST = gen.generateParser();
String output = outputFileST.render();
//System.out.println(output);
String b = "#" + actionName + "#";
int start = output.indexOf(b);
String e = "#end-" + actionName + "#";
int end = output.indexOf(e);
String snippet = output.substring(start + b.length(), end);
assertEquals(expected, snippet);
}
if (equeue.size() > 0) {
System.err.println(equeue.toString());
}
}
use of edu.princeton.cs.algs4.ST in project antlr4 by antlr.
the class BasePython2Test method writeLexerTestFile.
@Override
protected void writeLexerTestFile(String lexerName, boolean showDFA) {
ST outputFileST = new ST("from __future__ import print_function\n" + "import sys\n" + "import codecs\n" + "from antlr4 import *\n" + "from <lexerName> import <lexerName>\n" + "\n" + "def main(argv):\n" + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" + " lexer = <lexerName>(input, output)\n" + " stream = CommonTokenStream(lexer)\n" + " stream.fill()\n" + " [ print(t, file=output) for t in stream.tokens ]\n" + (showDFA ? " print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='', file=output)\n" : "") + "\n" + "if __name__ == '__main__':\n" + " main(sys.argv)\n" + "\n");
outputFileST.add("lexerName", lexerName);
writeFile(tmpdir, "Test.py", outputFileST.render());
}
Aggregations