use of org.antlr.v4.tool.ast.GrammarRootAST in project antlr4 by antlr.
the class Grammar method getStateToGrammarRegionMap.
public static Map<Integer, Interval> getStateToGrammarRegionMap(GrammarRootAST ast, IntervalSet grammarTokenTypes) {
Map<Integer, Interval> stateToGrammarRegionMap = new HashMap<Integer, Interval>();
if (ast == null)
return stateToGrammarRegionMap;
List<GrammarAST> nodes = ast.getNodesWithType(grammarTokenTypes);
for (GrammarAST n : nodes) {
if (n.atnState != null) {
Interval tokenRegion = Interval.of(n.getTokenStartIndex(), n.getTokenStopIndex());
org.antlr.runtime.tree.Tree ruleNode = null;
// RULEs, BLOCKs of transformed recursive rules point to original token interval
switch(n.getType()) {
case ANTLRParser.RULE:
ruleNode = n;
break;
case ANTLRParser.BLOCK:
case ANTLRParser.CLOSURE:
ruleNode = n.getAncestor(ANTLRParser.RULE);
break;
}
if (ruleNode instanceof RuleAST) {
String ruleName = ((RuleAST) ruleNode).getRuleName();
Rule r = ast.g.getRule(ruleName);
if (r instanceof LeftRecursiveRule) {
RuleAST originalAST = ((LeftRecursiveRule) r).getOriginalAST();
tokenRegion = Interval.of(originalAST.getTokenStartIndex(), originalAST.getTokenStopIndex());
}
}
stateToGrammarRegionMap.put(n.atnState.stateNumber, tokenRegion);
}
}
return stateToGrammarRegionMap;
}
use of org.antlr.v4.tool.ast.GrammarRootAST in project antlr4 by antlr.
the class GrammarTransformPipeline method extractImplicitLexer.
/** Build lexer grammar from combined grammar that looks like:
*
* (COMBINED_GRAMMAR A
* (tokens { X (= Y 'y'))
* (OPTIONS (= x 'y'))
* (@ members {foo})
* (@ lexer header {package jj;})
* (RULES (RULE .+)))
*
* Move rules and actions to new tree, don't dup. Split AST apart.
* We'll have this Grammar share token symbols later; don't generate
* tokenVocab or tokens{} section. Copy over named actions.
*
* Side-effects: it removes children from GRAMMAR & RULES nodes
* in combined AST. Anything cut out is dup'd before
* adding to lexer to avoid "who's ur daddy" issues
*/
public GrammarRootAST extractImplicitLexer(Grammar combinedGrammar) {
GrammarRootAST combinedAST = combinedGrammar.ast;
//tool.log("grammar", "before="+combinedAST.toStringTree());
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(combinedAST.token.getInputStream());
GrammarAST[] elements = combinedAST.getChildren().toArray(new GrammarAST[0]);
// MAKE A GRAMMAR ROOT and ID
String lexerName = combinedAST.getChild(0).getText() + "Lexer";
GrammarRootAST lexerAST = new GrammarRootAST(new CommonToken(ANTLRParser.GRAMMAR, "LEXER_GRAMMAR"), combinedGrammar.ast.tokenStream);
lexerAST.grammarType = ANTLRParser.LEXER;
lexerAST.token.setInputStream(combinedAST.token.getInputStream());
lexerAST.addChild((GrammarAST) adaptor.create(ANTLRParser.ID, lexerName));
// COPY OPTIONS
GrammarAST optionsRoot = (GrammarAST) combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS);
if (optionsRoot != null && optionsRoot.getChildCount() != 0) {
GrammarAST lexerOptionsRoot = (GrammarAST) adaptor.dupNode(optionsRoot);
lexerAST.addChild(lexerOptionsRoot);
GrammarAST[] options = optionsRoot.getChildren().toArray(new GrammarAST[0]);
for (GrammarAST o : options) {
String optionName = o.getChild(0).getText();
if (Grammar.lexerOptions.contains(optionName) && !Grammar.doNotCopyOptionsToLexer.contains(optionName)) {
GrammarAST optionTree = (GrammarAST) adaptor.dupTree(o);
lexerOptionsRoot.addChild(optionTree);
lexerAST.setOption(optionName, (GrammarAST) optionTree.getChild(1));
}
}
}
// COPY all named actions, but only move those with lexer:: scope
List<GrammarAST> actionsWeMoved = new ArrayList<GrammarAST>();
for (GrammarAST e : elements) {
if (e.getType() == ANTLRParser.AT) {
lexerAST.addChild((Tree) adaptor.dupTree(e));
if (e.getChild(0).getText().equals("lexer")) {
actionsWeMoved.add(e);
}
}
}
for (GrammarAST r : actionsWeMoved) {
combinedAST.deleteChild(r);
}
GrammarAST combinedRulesRoot = (GrammarAST) combinedAST.getFirstChildWithType(ANTLRParser.RULES);
if (combinedRulesRoot == null)
return lexerAST;
// MOVE lexer rules
GrammarAST lexerRulesRoot = (GrammarAST) adaptor.create(ANTLRParser.RULES, "RULES");
lexerAST.addChild(lexerRulesRoot);
List<GrammarAST> rulesWeMoved = new ArrayList<GrammarAST>();
GrammarASTWithOptions[] rules;
if (combinedRulesRoot.getChildCount() > 0) {
rules = combinedRulesRoot.getChildren().toArray(new GrammarASTWithOptions[0]);
} else {
rules = new GrammarASTWithOptions[0];
}
for (GrammarASTWithOptions r : rules) {
String ruleName = r.getChild(0).getText();
if (Grammar.isTokenName(ruleName)) {
lexerRulesRoot.addChild((Tree) adaptor.dupTree(r));
rulesWeMoved.add(r);
}
}
for (GrammarAST r : rulesWeMoved) {
combinedRulesRoot.deleteChild(r);
}
// Will track 'if' from IF : 'if' ; rules to avoid defining new token for 'if'
List<Pair<GrammarAST, GrammarAST>> litAliases = Grammar.getStringLiteralAliasesFromLexerRules(lexerAST);
Set<String> stringLiterals = combinedGrammar.getStringLiterals();
// add strings from combined grammar (and imported grammars) into lexer
// put them first as they are keywords; must resolve ambigs to these rules
// tool.log("grammar", "strings from parser: "+stringLiterals);
int insertIndex = 0;
nextLit: for (String lit : stringLiterals) {
// if lexer already has a rule for literal, continue
if (litAliases != null) {
for (Pair<GrammarAST, GrammarAST> pair : litAliases) {
GrammarAST litAST = pair.b;
if (lit.equals(litAST.getText()))
continue nextLit;
}
}
// create for each literal: (RULE <uniquename> (BLOCK (ALT <lit>))
String rname = combinedGrammar.getStringLiteralLexerRuleName(lit);
// can't use wizard; need special node types
GrammarAST litRule = new RuleAST(ANTLRParser.RULE);
BlockAST blk = new BlockAST(ANTLRParser.BLOCK);
AltAST alt = new AltAST(ANTLRParser.ALT);
TerminalAST slit = new TerminalAST(new CommonToken(ANTLRParser.STRING_LITERAL, lit));
alt.addChild(slit);
blk.addChild(alt);
CommonToken idToken = new CommonToken(ANTLRParser.TOKEN_REF, rname);
litRule.addChild(new TerminalAST(idToken));
litRule.addChild(blk);
lexerRulesRoot.insertChild(insertIndex, litRule);
// lexerRulesRoot.getChildren().add(0, litRule);
// reset indexes and set litRule parent
lexerRulesRoot.freshenParentAndChildIndexes();
// next literal will be added after the one just added
insertIndex++;
}
// TODO: take out after stable if slow
lexerAST.sanityCheckParentAndChildIndexes();
combinedAST.sanityCheckParentAndChildIndexes();
// tool.log("grammar", combinedAST.toTokenString());
combinedGrammar.tool.log("grammar", "after extract implicit lexer =" + combinedAST.toStringTree());
combinedGrammar.tool.log("grammar", "lexer =" + lexerAST.toStringTree());
if (lexerRulesRoot.getChildCount() == 0)
return null;
return lexerAST;
}
use of org.antlr.v4.tool.ast.GrammarRootAST in project antlr4 by antlr.
the class GrammarTransformPipeline method process.
public void process() {
GrammarRootAST root = g.ast;
if (root == null)
return;
tool.log("grammar", "before: " + root.toStringTree());
integrateImportedGrammars(g);
reduceBlocksToSets(root);
expandParameterizedLoops(root);
tool.log("grammar", "after: " + root.toStringTree());
}
use of org.antlr.v4.tool.ast.GrammarRootAST in project antlr4 by antlr.
the class GrammarDependencies method analyse.
private void analyse(File grammarFile, Collection<File> grammarFiles, Tool tool) {
GrammarRootAST grammar = tool.parseGrammar(grammarFile.getAbsolutePath());
if (grammar == null)
return;
for (GrammarAST importDecl : grammar.getAllChildrenWithType(ANTLRParser.IMPORT)) {
Tree id = importDecl.getFirstChildWithType(ANTLRParser.ID);
// being reported by the ANTLR tool
if (id != null) {
String grammarPath = getRelativePath(grammarFile);
graph.addEdge(id.getText() + ".g4", grammarPath);
}
}
for (GrammarAST options : grammar.getAllChildrenWithType(ANTLRParser.OPTIONS)) {
for (int i = 0, count = options.getChildCount(); i < count; i++) {
Tree option = options.getChild(i);
if (option.getType() == ANTLRParser.ASSIGN) {
String key = option.getChild(0).getText();
String value = option.getChild(1).getText();
if ("tokenVocab".equals(key)) {
String name = stripQuotes(value);
// the grammar name may be qualified, but we resolve the path anyway
String grammarName = stripPath(name);
String grammarPath = MojoUtils.findSourceSubdir(sourceDirectory, grammarFile);
File depGrammarFile = resolve(grammarName, grammarPath);
// (files probably reside in the root directory anyway with such a configuration )
if (packageName != null)
grammarPath = packageName;
graph.addEdge(getRelativePath(depGrammarFile), grammarPath + grammarFile.getName());
}
}
}
}
}
use of org.antlr.v4.tool.ast.GrammarRootAST in project antlr4 by antlr.
the class TestATNConstruction method testParserRuleRefInLexerRule.
@Test
public void testParserRuleRefInLexerRule() throws Exception {
boolean threwException = false;
ErrorQueue errorQueue = new ErrorQueue();
try {
String gstr = "grammar U;\n" + "a : A;\n" + "A : a;\n";
Tool tool = new Tool();
tool.removeListeners();
tool.addListener(errorQueue);
assertEquals(0, errorQueue.size());
GrammarRootAST grammarRootAST = tool.parseGrammarFromString(gstr);
assertEquals(0, errorQueue.size());
Grammar g = tool.createGrammar(grammarRootAST);
assertEquals(0, errorQueue.size());
g.fileName = "<string>";
tool.process(g, false);
} catch (Exception e) {
threwException = true;
e.printStackTrace();
}
System.out.println(errorQueue);
assertEquals(1, errorQueue.errors.size());
assertEquals(ErrorType.PARSER_RULE_REF_IN_LEXER_RULE, errorQueue.errors.get(0).getErrorType());
assertEquals("[a, A]", Arrays.toString(errorQueue.errors.get(0).getArgs()));
assertTrue(!threwException);
}
Aggregations