use of org.antlr.v4.runtime.ANTLRInputStream in project antlr4 by antlr.
the class TestBufferedTokenStream method testFirstToken.
@Test
public void testFirstToken() throws Exception {
LexerGrammar g = new LexerGrammar("lexer grammar t;\n" + "ID : 'a'..'z'+;\n" + "INT : '0'..'9'+;\n" + "SEMI : ';';\n" + "ASSIGN : '=';\n" + "PLUS : '+';\n" + "MULT : '*';\n" + "WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
String result = tokens.LT(1).getText();
String expecting = "x";
assertEquals(expecting, result);
}
use of org.antlr.v4.runtime.ANTLRInputStream in project antlr4 by antlr.
the class TestAmbigParseTrees method testAmbiguousTrees.
public void testAmbiguousTrees(LexerGrammar lg, Grammar g, String startRule, String input, int decision, String expectedAmbigAlts, String overallTree, String[] expectedParseTrees) {
InterpreterTreeTextProvider nodeTextProvider = new InterpreterTreeTextProvider(g.getRuleNames());
LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
final GrammarParserInterpreter parser = g.createGrammarParserInterpreter(tokens);
parser.setProfile(true);
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
// PARSE
int ruleIndex = g.rules.get(startRule).index;
ParserRuleContext parseTree = parser.parse(ruleIndex);
assertEquals(overallTree, Trees.toStringTree(parseTree, nodeTextProvider));
System.out.println();
DecisionInfo[] decisionInfo = parser.getParseInfo().getDecisionInfo();
List<AmbiguityInfo> ambiguities = decisionInfo[decision].ambiguities;
assertEquals(1, ambiguities.size());
AmbiguityInfo ambiguityInfo = ambiguities.get(0);
List<ParserRuleContext> ambiguousParseTrees = GrammarParserInterpreter.getAllPossibleParseTrees(g, parser, tokens, decision, ambiguityInfo.ambigAlts, ambiguityInfo.startIndex, ambiguityInfo.stopIndex, ruleIndex);
assertEquals(expectedAmbigAlts, ambiguityInfo.ambigAlts.toString());
assertEquals(ambiguityInfo.ambigAlts.cardinality(), ambiguousParseTrees.size());
for (int i = 0; i < ambiguousParseTrees.size(); i++) {
ParserRuleContext t = ambiguousParseTrees.get(i);
assertEquals(expectedParseTrees[i], Trees.toStringTree(t, nodeTextProvider));
}
}
use of org.antlr.v4.runtime.ANTLRInputStream in project antlr4 by antlr.
the class TestLookaheadTrees method testLookaheadTrees.
public void testLookaheadTrees(LexerGrammar lg, Grammar g, String input, String startRuleName, int decision, String[] expectedTrees) {
int startRuleIndex = g.getRule(startRuleName).index;
InterpreterTreeTextProvider nodeTextProvider = new InterpreterTreeTextProvider(g.getRuleNames());
LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
GrammarParserInterpreter parser = g.createGrammarParserInterpreter(tokens);
parser.setProfile(true);
ParseTree t = parser.parse(startRuleIndex);
DecisionInfo decisionInfo = parser.getParseInfo().getDecisionInfo()[decision];
LookaheadEventInfo lookaheadEventInfo = decisionInfo.SLL_MaxLookEvent;
List<ParserRuleContext> lookaheadParseTrees = GrammarParserInterpreter.getLookaheadParseTrees(g, parser, tokens, startRuleIndex, lookaheadEventInfo.decision, lookaheadEventInfo.startIndex, lookaheadEventInfo.stopIndex);
assertEquals(expectedTrees.length, lookaheadParseTrees.size());
for (int i = 0; i < lookaheadParseTrees.size(); i++) {
ParserRuleContext lt = lookaheadParseTrees.get(i);
assertEquals(expectedTrees[i], Trees.toStringTree(lt, nodeTextProvider));
}
}
use of org.antlr.v4.runtime.ANTLRInputStream in project antlr4 by antlr.
the class TestTokenStreamRewriter method testReplaceThenReplaceLowerIndexedSuperset.
@Test
public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
LexerGrammar g = new LexerGrammar("lexer grammar T;\n" + "A : 'a';\n" + "B : 'b';\n" + "C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
tokens.replace(2, 4, "xyz");
tokens.replace(1, 3, "foo");
stream.fill();
// overlap, error
Exception exc = null;
try {
tokens.getText();
} catch (IllegalArgumentException iae) {
exc = iae;
}
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">";
assertNotNull(exc);
assertEquals(expecting, exc.getMessage());
}
use of org.antlr.v4.runtime.ANTLRInputStream in project antlr4 by antlr.
the class TestTokenStreamRewriter method testInsertBeforeTokenThenDeleteThatToken.
@Test
public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
LexerGrammar g = new LexerGrammar("lexer grammar T;\n" + "A : 'a';\n" + "B : 'b';\n" + "C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
tokens.insertBefore(2, "y");
tokens.delete(2);
String result = tokens.getText();
String expecting = "aby";
assertEquals(expecting, result);
}
Aggregations