use of org.antlr.v4.runtime.atn.PredictionContextCache in project presto by prestodb.
the class AntlrATNCacheFields method configureLexer.
@SuppressWarnings("ObjectEquality")
public void configureLexer(Lexer lexer) {
requireNonNull(lexer, "lexer is null");
// Intentional identity equals comparison
checkArgument(atn == lexer.getATN(), "Lexer ATN mismatch: expected %s, found %s", atn, lexer.getATN());
lexer.setInterpreter(new LexerATNSimulator(lexer, atn, decisionToDFA, predictionContextCache));
}
use of org.antlr.v4.runtime.atn.PredictionContextCache in project presto by prestodb.
the class AntlrATNCacheFields method configureParser.
@SuppressWarnings("ObjectEquality")
public void configureParser(Parser parser) {
requireNonNull(parser, "parser is null");
// Intentional identity equals comparison
checkArgument(atn == parser.getATN(), "Parser ATN mismatch: expected %s, found %s", atn, parser.getATN());
parser.setInterpreter(new ParserATNSimulator(parser, atn, decisionToDFA, predictionContextCache));
}
use of org.antlr.v4.runtime.atn.PredictionContextCache in project titan.EclipsePlug-ins by eclipse.
the class TTCN3Analyzer method parse.
/**
* Parse TTCN-3 file using ANTLR v4
* @param aReader file to parse (cannot be null, closes aReader)
* @param aFileLength file length
* @param aEclipseFile Eclipse dependent resource file
*/
private void parse(final Reader aReader, final int aFileLength, final IFile aEclipseFile) {
CharStream charStream = new UnbufferedCharStream(aReader);
Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
lexer.setCommentTodo(true);
lexer.setTokenFactory(new CommonTokenFactory(true));
lexer.initRootInterval(aFileLength);
TitanListener lexerListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
Ttcn3Parser parser = new Ttcn3Parser(tokenStream);
ParserUtilities.setBuildParseTree(parser);
PreprocessedTokenStream preprocessor = null;
if (aEclipseFile != null && GlobalParser.TTCNPP_EXTENSION.equals(aEclipseFile.getFileExtension())) {
lexer.setTTCNPP();
preprocessor = new PreprocessedTokenStream(lexer);
preprocessor.setActualFile(aEclipseFile);
if (aEclipseFile.getProject() != null) {
preprocessor.setMacros(PreprocessorSymbolsOptionsData.getTTCN3PreprocessorDefines(aEclipseFile.getProject()));
}
parser = new Ttcn3Parser(preprocessor);
ParserUtilities.setBuildParseTree(parser);
preprocessor.setActualLexer(lexer);
preprocessor.setParser(parser);
}
if (aEclipseFile != null) {
lexer.setActualFile(aEclipseFile);
parser.setActualFile(aEclipseFile);
parser.setProject(aEclipseFile.getProject());
}
// remove ConsoleErrorListener
parser.removeErrorListeners();
TitanListener parserListener = new TitanListener();
parser.addErrorListener(parserListener);
// This is added because of the following ANTLR 4 bug:
// Memory Leak in PredictionContextCache #499
// https://github.com/antlr/antlr4/issues/499
DFA[] decisionToDFA = parser.getInterpreter().decisionToDFA;
parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, new PredictionContextCache()));
// try SLL mode
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
// quit
}
if (!warnings.isEmpty() || !mErrorsStored.isEmpty()) {
// SLL mode might have failed, try LL mode
try {
CharStream charStream2 = new UnbufferedCharStream(aReader);
lexer.setInputStream(charStream2);
// lexer.reset();
parser.reset();
parserListener.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
}
}
unsupportedConstructs = parser.getUnsupportedConstructs();
rootInterval = lexer.getRootInterval();
actualTtc3Module = parser.getModule();
if (preprocessor != null) {
// if the file was preprocessed
mErrorsStored.addAll(preprocessor.getErrorStorage());
warnings.addAll(preprocessor.getWarnings());
unsupportedConstructs.addAll(preprocessor.getUnsupportedConstructs());
if (actualTtc3Module != null) {
actualTtc3Module.setIncludedFiles(preprocessor.getIncludedFiles());
actualTtc3Module.setInactiveCodeLocations(preprocessor.getInactiveCodeLocations());
}
}
try {
aReader.close();
} catch (IOException e) {
}
}
use of org.antlr.v4.runtime.atn.PredictionContextCache in project antlr4 by antlr.
the class TestATNParserPrediction method checkDFAConstruction.
public void checkDFAConstruction(LexerGrammar lg, Grammar g, int decision, String[] inputString, String[] dfaString) {
// Tool.internalOption_ShowATNConfigsInDFA = true;
ATN lexatn = createATN(lg, true);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn, new DFA[] { new DFA(lexatn.getDecisionState(Lexer.DEFAULT_MODE)) }, new PredictionContextCache());
semanticProcess(lg);
g.importVocab(lg);
semanticProcess(g);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, null);
for (int i = 0; i < inputString.length; i++) {
// Check DFA
IntegerList types = getTokenTypesViaATN(inputString[i], lexInterp);
// System.out.println(types);
TokenStream input = new MockIntTokenStream(types);
try {
interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY);
} catch (NoViableAltException nvae) {
nvae.printStackTrace(System.err);
}
DFA dfa = interp.parser.decisionToDFA[decision];
assertEquals(dfaString[i], dfa.toString(g.getVocabulary()));
}
}
Aggregations