use of org.antlr.v4.runtime.dfa.DFA in project antlr4 by antlr.
the class TestATNParserPrediction method checkPredictedAlt.
/**
* first check that the ATN predicts right alt.
* Then check adaptive prediction.
*/
public void checkPredictedAlt(LexerGrammar lg, Grammar g, int decision, String inputString, int expectedAlt) {
Tool.internalOption_ShowATNConfigsInDFA = true;
ATN lexatn = createATN(lg, true);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn, new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, new PredictionContextCache());
IntegerList types = getTokenTypesViaATN(inputString, lexInterp);
// System.out.println(types);
semanticProcess(lg);
g.importVocab(lg);
semanticProcess(g);
ParserATNFactory f = new ParserATNFactory(g);
ATN atn = f.createATN();
DOTGenerator dot = new DOTGenerator(g);
Rule r = g.getRule("a");
// if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
r = g.getRule("b");
// if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
r = g.getRule("e");
// if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
r = g.getRule("ifstat");
// if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
r = g.getRule("block");
// if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
// Check ATN prediction
// ParserATNSimulator interp = new ParserATNSimulator(atn);
TokenStream input = new MockIntTokenStream(types);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input);
int alt = interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY);
assertEquals(expectedAlt, alt);
// Check adaptive prediction
input.seek(0);
alt = interp.adaptivePredict(input, decision, null);
assertEquals(expectedAlt, alt);
// run 2x; first time creates DFA in atn
input.seek(0);
alt = interp.adaptivePredict(input, decision, null);
assertEquals(expectedAlt, alt);
}
use of org.antlr.v4.runtime.dfa.DFA in project antlr4 by antlr.
the class TestATNInterpreter method checkMatchedAlt.
public void checkMatchedAlt(LexerGrammar lg, final Grammar g, String inputString, int expected) {
ATN lexatn = createATN(lg, true);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn, new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, null);
IntegerList types = getTokenTypesViaATN(inputString, lexInterp);
// System.out.println(types);
g.importVocab(lg);
ParserATNFactory f = new ParserATNFactory(g);
ATN atn = f.createATN();
TokenStream input = new MockIntTokenStream(types);
// System.out.println("input="+input.types);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input);
ATNState startState = atn.ruleToStartState[g.getRule("a").index];
if (startState.transition(0).target instanceof BlockStartState) {
startState = startState.transition(0).target;
}
DOTGenerator dot = new DOTGenerator(g);
// System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule("a").index]));
Rule r = g.getRule("e");
// if ( r!=null ) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
int result = interp.matchATN(input, startState);
assertEquals(expected, result);
}
use of org.antlr.v4.runtime.dfa.DFA in project checkstyle by checkstyle.
the class JavaAstVisitorTest method testNoStackOverflowOnDeepStringConcat.
/**
* This test exists to kill surviving mutation from pitest removing expression AST building
* optimization in {@link JavaAstVisitor#visitBinOp(JavaLanguageParser.BinOpContext)}.
* We do not use {@link JavaParser#parse(FileContents)} here due to DFA clearing hack.
*
* <p>
* Reason: we have iterative expression AST building to avoid stackoverflow
* in {@link JavaAstVisitor#visitBinOp(JavaLanguageParser.BinOpContext)}. In actual
* generated parser, we avoid stackoverflow thanks to the left recursive expression
* rule (eliminating unnecessary recursive calls to hierarchical expression production rules).
* However, ANTLR's ParserATNSimulator has no such optimization. So, the number of recursive
* calls to ParserATNSimulator#closure when calling ParserATNSimulator#clearDFA causes a
* StackOverflow error. We avoid this by using the single argument constructor (thus not
* forcing DFA clearing) in this test.
* </p>
*
* @throws Exception if input file does not exist
*/
@Test
public void testNoStackOverflowOnDeepStringConcat() throws Exception {
final File file = new File(getPath("InputJavaAstVisitorNoStackOverflowOnDeepStringConcat.java"));
final FileText fileText = new FileText(file, StandardCharsets.UTF_8.name());
final FileContents contents = new FileContents(fileText);
final String fullText = contents.getText().getFullText().toString();
final CharStream codePointCharStream = CharStreams.fromString(fullText);
final JavaLanguageLexer lexer = new JavaLanguageLexer(codePointCharStream, true);
lexer.setCommentListener(contents);
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
final JavaLanguageParser parser = new JavaLanguageParser(tokenStream);
final JavaLanguageParser.CompilationUnitContext compilationUnit = parser.compilationUnit();
// We restrict execution to use limited resources here, so that we can
// kill surviving pitest mutation from removal of nested binary operation
// optimization in JavaAstVisitor#visitBinOp. Limited resources (small stack size)
// ensure that we throw a StackOverflowError if optimization is removed.
final DetailAST root = TestUtil.getResultWithLimitedResources(() -> new JavaAstVisitor(tokenStream).visit(compilationUnit));
assertWithMessage("File parsing and AST building should complete successfully.").that(root).isNotNull();
}
use of org.antlr.v4.runtime.dfa.DFA in project sts4 by spring-projects.
the class AntlrParser method parse.
@Override
public ParseResults parse(String text) {
ArrayList<Problem> syntaxErrors = new ArrayList<>();
ArrayList<Problem> problems = new ArrayList<>();
ArrayList<PropertiesAst.Node> astNodes = new ArrayList<>();
JavaPropertiesLexer lexer = new JavaPropertiesLexer(new ANTLRInputStream(text.toCharArray(), text.length()));
CommonTokenStream tokens = new CommonTokenStream(lexer);
JavaPropertiesParser parser = new JavaPropertiesParser(tokens);
// To avoid printing parse errors in the console
parser.removeErrorListener(ConsoleErrorListener.INSTANCE);
// Add listener to collect various parser errors
parser.addErrorListener(new ANTLRErrorListener() {
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) {
syntaxErrors.add(createProblem(msg, ProblemCodes.PROPERTIES_SYNTAX_ERROR, (Token) offendingSymbol));
}
@Override
public void reportAmbiguity(org.antlr.v4.runtime.Parser recognizer, DFA dfa, int startIndex, int stopIndex, boolean exact, BitSet ambigAlts, ATNConfigSet configs) {
problems.add(createProblem("Ambiguity detected!", ProblemCodes.PROPERTIES_AMBIGUITY_ERROR, recognizer.getCurrentToken()));
}
@Override
public void reportAttemptingFullContext(org.antlr.v4.runtime.Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) {
problems.add(createProblem("Full-Context attempt detected!", ProblemCodes.PROPERTIES_FULL_CONTEXT_ERROR, recognizer.getCurrentToken()));
}
@Override
public void reportContextSensitivity(org.antlr.v4.runtime.Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) {
problems.add(createProblem("Context sensitivity detected!", ProblemCodes.PROPERTIES_CONTEXT_SENSITIVITY_ERROR, recognizer.getCurrentToken()));
}
});
// Add listener to the parse tree to collect AST nodes
parser.addParseListener(new JavaPropertiesBaseListener() {
private Key key = null;
private Value value = null;
@Override
public void exitPropertyLine(PropertyLineContext ctx) {
KeyValuePair pair = new KeyValuePair(ctx, key, value);
key.parent = value.parent = pair;
astNodes.add(pair);
key = null;
value = null;
}
@Override
public void exitCommentLine(CommentLineContext ctx) {
astNodes.add(new Comment(ctx));
}
@Override
public void exitKey(KeyContext ctx) {
key = new Key(ctx);
}
@Override
public void exitSeparatorAndValue(SeparatorAndValueContext ctx) {
value = new Value(ctx);
}
@Override
public void exitEmptyLine(EmptyLineContext ctx) {
astNodes.add(new EmptyLine(ctx));
}
});
parser.parse();
// Collect and return parse results
return new ParseResults(new PropertiesAst(ImmutableList.copyOf(astNodes)), ImmutableList.copyOf(syntaxErrors), ImmutableList.copyOf(problems));
}
use of org.antlr.v4.runtime.dfa.DFA in project antlr4 by antlr.
the class BasePythonTest method getTokenTypes.
public List<String> getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) {
LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, null);
List<String> tokenTypes = new ArrayList<String>();
int ttype;
boolean hitEOF = false;
do {
if (hitEOF) {
tokenTypes.add("EOF");
break;
}
int t = input.LA(1);
ttype = interp.match(input, Lexer.DEFAULT_MODE);
if (ttype == Token.EOF) {
tokenTypes.add("EOF");
} else {
tokenTypes.add(lg.typeToTokenList.get(ttype));
}
if (t == IntStream.EOF) {
hitEOF = true;
}
} while (ttype != Token.EOF);
return tokenTypes;
}
Aggregations