use of org.antlr.v4.runtime.BufferedTokenStream in project antlr4 by tunnelvisionlabs.
the class TestTokenStream method testBufferedTokenStreamReuseAfterFill.
/**
* This is a targeted regression test for antlr/antlr4#1584 ({@link BufferedTokenStream} cannot be reused after EOF).
*/
@Test
public void testBufferedTokenStreamReuseAfterFill() {
CharStream firstInput = CharStreams.fromString("A");
BufferedTokenStream tokenStream = new BufferedTokenStream(new XPathLexer(firstInput));
tokenStream.fill();
Assert.assertEquals(2, tokenStream.size());
Assert.assertEquals(XPathLexer.TOKEN_REF, tokenStream.get(0).getType());
Assert.assertEquals(Token.EOF, tokenStream.get(1).getType());
CharStream secondInput = CharStreams.fromString("A/");
tokenStream.setTokenSource(new XPathLexer(secondInput));
tokenStream.fill();
Assert.assertEquals(3, tokenStream.size());
Assert.assertEquals(XPathLexer.TOKEN_REF, tokenStream.get(0).getType());
Assert.assertEquals(XPathLexer.ROOT, tokenStream.get(1).getType());
Assert.assertEquals(Token.EOF, tokenStream.get(2).getType());
}
use of org.antlr.v4.runtime.BufferedTokenStream in project titan.EclipsePlug-ins by eclipse.
the class CfgAnalyzer method directParse.
/**
* Parses the provided elements.
* If the contents of an editor are to be parsed, than the file parameter is only used to report the errors to.
*
* @param file the file to parse
* @param fileName the name of the file, to refer to.
* @param code the contents of an editor, or null.
*/
public void directParse(final IFile file, final String fileName, final String code) {
final Reader reader;
final int fileLength;
if (null != code) {
reader = new StringReader(code);
fileLength = code.length();
} else if (null != file) {
try {
reader = new BufferedReader(new InputStreamReader(file.getContents(), StandardCharsets.UTF8));
final IFileStore store = EFS.getStore(file.getLocationURI());
final IFileInfo fileInfo = store.fetchInfo();
fileLength = (int) fileInfo.getLength();
} catch (CoreException e) {
ErrorReporter.logExceptionStackTrace("Could not get the contents of `" + fileName + "'", e);
return;
}
} else {
ErrorReporter.INTERNAL_ERROR("CfgAnalyzer.directParse(): nothing to parse");
return;
}
final CharStream charStream = new UnbufferedCharStream(reader);
final CfgLexer lexer = new CfgLexer(charStream);
lexer.setTokenFactory(new CommonTokenFactory(true));
lexer.initRootInterval(fileLength);
lexerListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
final CfgParser parser = new CfgParser(tokenStream);
parser.setActualFile(file);
// parser tree is built by default
parserListener = new TitanListener();
// remove ConsoleErrorListener
parser.removeErrorListeners();
parser.addErrorListener(parserListener);
final ParserRuleContext parseTreeRoot = parser.pr_ConfigFile();
mCfgParseResult = parser.getCfgParseResult();
// manually add the result parse tree, and its corresponding token stream,
// because they logically belong to here
mCfgParseResult.setParseTreeRoot(parseTreeRoot);
mCfgParseResult.setTokens(tokenStream.getTokens());
// fill handlers
moduleParametersHandler = parser.getModuleParametersHandler();
testportParametersHandler = parser.getTestportParametersHandler();
componentSectionHandler = parser.getComponentSectionHandler();
groupSectionHandler = parser.getGroupSectionHandler();
mcSectionHandler = parser.getMcSectionHandler();
externalCommandsSectionHandler = parser.getExternalCommandsSectionHandler();
executeSectionHandler = parser.getExecuteSectionHandler();
includeSectionHandler = parser.getIncludeSectionHandler();
orderedIncludeSectionHandler = parser.getOrderedIncludeSectionHandler();
defineSectionHandler = parser.getDefineSectionHandler();
loggingSectionHandler = parser.getLoggingSectionHandler();
rootInterval = lexer.getRootInterval();
try {
reader.close();
} catch (IOException e) {
}
}
use of org.antlr.v4.runtime.BufferedTokenStream in project titan.EclipsePlug-ins by eclipse.
the class Definition method parseErrAttrSpecString.
private static ErroneousAttributeSpecification parseErrAttrSpecString(final AttributeSpecification aAttrSpec) {
String code = aAttrSpec.getSpecification();
if (code == null) {
return null;
}
final Location location = aAttrSpec.getLocation();
// code must be transformed, according to
// compiler2/ttcn3/charstring_la.l
// TODO
code = Ttcn3CharstringLexer.parseCharstringValue(code, location);
final Reader reader = new StringReader(code);
final CharStream charStream = new UnbufferedCharStream(reader);
final Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
lexer.setTokenFactory(new CommonTokenFactory(true));
// needs to be shifted by one because of the \" of the string
lexer.setCharPositionInLine(0);
// lexer and parser listener
final TitanListener parserListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(parserListener);
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
final Ttcn3Reparser parser = new Ttcn3Reparser(tokenStream);
ParserUtilities.setBuildParseTree(parser);
final IFile file = (IFile) location.getFile();
parser.setActualFile(file);
parser.setOffset(location.getOffset());
parser.setLine(location.getLine());
// remove ConsoleErrorListener
parser.removeErrorListeners();
parser.addErrorListener(parserListener);
MarkerHandler.markMarkersForRemoval(GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER, location.getFile(), location.getOffset(), location.getEndOffset());
final Pr_ErroneousAttributeSpecContext root = parser.pr_ErroneousAttributeSpec();
ParserUtilities.logParseTree(root, parser);
final ErroneousAttributeSpecification returnValue = root.errAttrSpec;
final List<SyntacticErrorStorage> errors = parser.getErrors();
final List<TITANMarker> warnings = parser.getWarnings();
final List<TITANMarker> unsupportedConstructs = parser.getUnsupportedConstructs();
// add markers
if (errors != null) {
for (int i = 0; i < errors.size(); i++) {
final Location temp = new Location(location);
temp.setOffset(temp.getOffset());
ParserMarkerSupport.createOnTheFlySyntacticMarker(file, errors.get(i), IMarker.SEVERITY_ERROR, temp);
}
}
if (warnings != null) {
for (final TITANMarker marker : warnings) {
if (file.isAccessible()) {
final Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(), GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
}
}
}
if (unsupportedConstructs != null) {
for (final TITANMarker marker : unsupportedConstructs) {
if (file.isAccessible()) {
final Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(), GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
}
}
}
return returnValue;
}
use of org.antlr.v4.runtime.BufferedTokenStream in project titan.EclipsePlug-ins by eclipse.
the class TTCN3Analyzer method parse.
/**
* Parse TTCN-3 file using ANTLR v4
* @param aReader file to parse (cannot be null, closes aReader)
* @param aFileLength file length
* @param aEclipseFile Eclipse dependent resource file
*/
private void parse(final Reader aReader, final int aFileLength, final IFile aEclipseFile) {
CharStream charStream = new UnbufferedCharStream(aReader);
Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
lexer.setCommentTodo(true);
lexer.setTokenFactory(new CommonTokenFactory(true));
lexer.initRootInterval(aFileLength);
TitanListener lexerListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
Ttcn3Parser parser = new Ttcn3Parser(tokenStream);
ParserUtilities.setBuildParseTree(parser);
PreprocessedTokenStream preprocessor = null;
if (aEclipseFile != null && GlobalParser.TTCNPP_EXTENSION.equals(aEclipseFile.getFileExtension())) {
lexer.setTTCNPP();
preprocessor = new PreprocessedTokenStream(lexer);
preprocessor.setActualFile(aEclipseFile);
if (aEclipseFile.getProject() != null) {
preprocessor.setMacros(PreprocessorSymbolsOptionsData.getTTCN3PreprocessorDefines(aEclipseFile.getProject()));
}
parser = new Ttcn3Parser(preprocessor);
ParserUtilities.setBuildParseTree(parser);
preprocessor.setActualLexer(lexer);
preprocessor.setParser(parser);
}
if (aEclipseFile != null) {
lexer.setActualFile(aEclipseFile);
parser.setActualFile(aEclipseFile);
parser.setProject(aEclipseFile.getProject());
}
// remove ConsoleErrorListener
parser.removeErrorListeners();
TitanListener parserListener = new TitanListener();
parser.addErrorListener(parserListener);
// This is added because of the following ANTLR 4 bug:
// Memory Leak in PredictionContextCache #499
// https://github.com/antlr/antlr4/issues/499
DFA[] decisionToDFA = parser.getInterpreter().decisionToDFA;
parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, new PredictionContextCache()));
// try SLL mode
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
// quit
}
if (!warnings.isEmpty() || !mErrorsStored.isEmpty()) {
// SLL mode might have failed, try LL mode
try {
CharStream charStream2 = new UnbufferedCharStream(aReader);
lexer.setInputStream(charStream2);
// lexer.reset();
parser.reset();
parserListener.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
}
}
unsupportedConstructs = parser.getUnsupportedConstructs();
rootInterval = lexer.getRootInterval();
actualTtc3Module = parser.getModule();
if (preprocessor != null) {
// if the file was preprocessed
mErrorsStored.addAll(preprocessor.getErrorStorage());
warnings.addAll(preprocessor.getWarnings());
unsupportedConstructs.addAll(preprocessor.getUnsupportedConstructs());
if (actualTtc3Module != null) {
actualTtc3Module.setIncludedFiles(preprocessor.getIncludedFiles());
actualTtc3Module.setInactiveCodeLocations(preprocessor.getInactiveCodeLocations());
}
}
try {
aReader.close();
} catch (IOException e) {
}
}
use of org.antlr.v4.runtime.BufferedTokenStream in project titan.EclipsePlug-ins by eclipse.
the class PreprocessedTokenStream method fetch.
@Override
public int fetch(int n) {
if (fetchedEOF) {
return 0;
}
int i = 0;
do {
Token t;
if (tokenStreamStack.isEmpty()) {
t = getTokenSource().nextToken();
} else {
t = tokenStreamStack.peek().getTokenSource().nextToken();
}
if (t == null) {
return 0;
}
int tokenType = t.getType();
if (tokenType == Ttcn3Lexer.PREPROCESSOR_DIRECTIVE) {
lastPPDirectiveLocation = new Location(actualFile, t.getLine(), t.getStartIndex(), t.getStopIndex() + 1);
// 1. the first # shall be discarded
// 2. "\\\n" strings are removed, so multiline tokens, which are split by backslash are extracted to one line
final String text = t.getText().substring(1).replace("\\\n", "");
Reader reader = new StringReader(text);
CharStream charStream = new UnbufferedCharStream(reader);
PreprocessorDirectiveLexer lexer = new PreprocessorDirectiveLexer(charStream);
lexer.setTokenFactory(new PPDirectiveTokenFactory(true, t));
lexerListener = new PPListener();
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
lexer.setLine(t.getLine());
lexer.setCharPositionInLine(t.getCharPositionInLine());
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
PreprocessorDirectiveParser localParser = new PreprocessorDirectiveParser(tokenStream);
localParser.setBuildParseTree(false);
parserListener = new PPListener(localParser);
localParser.removeErrorListeners();
localParser.addErrorListener(parserListener);
localParser.setIsActiveCode(condStateStack.isPassing());
localParser.setMacros(macros);
localParser.setLine(t.getLine());
PreprocessorDirective ppDirective = null;
ppDirective = localParser.pr_Directive().ppDirective;
errorsStored.addAll(localParser.getErrorStorage());
warnings.addAll(localParser.getWarnings());
unsupportedConstructs.addAll(localParser.getUnsupportedConstructs());
if (ppDirective != null) {
ppDirective.line = t.getLine();
if (ppDirective.isConditional()) {
boolean preIsPassing = condStateStack.isPassing();
condStateStack.processDirective(ppDirective);
boolean postIsPassing = condStateStack.isPassing();
if (preIsPassing != postIsPassing && tokenStreamStack.isEmpty() && getTokenSource() instanceof Ttcn3Lexer) {
// included files are ignored because of ambiguity
Location ppLocation = lastPPDirectiveLocation;
if (ppLocation != null) {
if (preIsPassing) {
// switched to inactive: begin a new inactive location
Location loc = new Location(actualFile, ppLocation.getLine(), ppLocation.getEndOffset(), ppLocation.getEndOffset());
inactiveCodeLocations.add(loc);
} else {
// switched to active: end the current inactive location
int iclSize = inactiveCodeLocations.size();
if (iclSize > 0) {
Location lastLocation = inactiveCodeLocations.get(iclSize - 1);
lastLocation.setEndOffset(ppLocation.getOffset());
}
}
}
}
} else {
// other directive types
if (condStateStack.isPassing()) {
// directive
switch(ppDirective.type) {
case INCLUDE:
{
if (tokenStreamStack.size() > RECURSION_LIMIT) {
// dumb but safe defense against infinite recursion, default value from gcc
TITANMarker marker = new TITANMarker("Maximum #include recursion depth reached", ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
unsupportedConstructs.add(marker);
} else {
// TODO: Makes the Eclipse slow down
processIncludeDirective(ppDirective);
}
}
break;
case ERROR:
{
String errorMessage = ppDirective.str == null ? "" : ppDirective.str;
TITANMarker marker = new TITANMarker(errorMessage, ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
unsupportedConstructs.add(marker);
}
break;
case WARNING:
{
String warningMessage = ppDirective.str == null ? "" : ppDirective.str;
TITANMarker marker = new TITANMarker(warningMessage, ppDirective.line, -1, -1, IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL);
warnings.add(marker);
}
break;
case LINECONTROL:
case LINEMARKER:
case PRAGMA:
case NULL:
{
String reportPreference = Platform.getPreferencesService().getString(ProductConstants.PRODUCT_ID_DESIGNER, PreferenceConstants.REPORT_IGNORED_PREPROCESSOR_DIRECTIVES, GeneralConstants.WARNING, null);
if (!GeneralConstants.IGNORE.equals(reportPreference)) {
boolean isError = GeneralConstants.ERROR.equals(reportPreference);
TITANMarker marker = new TITANMarker(MessageFormat.format("Preprocessor directive {0} is ignored", ppDirective.type.getName()), ppDirective.line, -1, -1, isError ? IMarker.SEVERITY_ERROR : IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL);
if (isError) {
unsupportedConstructs.add(marker);
} else {
warnings.add(marker);
}
}
}
break;
default:
}
}
}
}
} else if (tokenType == Token.EOF) {
if (!tokenStreamStack.isEmpty()) {
// the included file ended, drop lexer
// from the stack and ignore EOF token
TokenStreamData tsd = tokenStreamStack.pop();
if (parser != null) {
if (tokenStreamStack.isEmpty()) {
parser.setActualFile(actualFile);
} else {
parser.setActualFile(tokenStreamStack.peek().file);
}
}
if (tsd.reader != null) {
try {
tsd.reader.close();
} catch (IOException e) {
}
}
} else {
fetchedEOF = true;
condStateStack.eofCheck();
tokens.add(t);
((CommonToken) t).setTokenIndex(tokens.size() - 1);
--n;
++i;
if (n == 0) {
return i;
}
}
} else {
if (condStateStack.isPassing()) {
tokens.add(t);
((CommonToken) t).setTokenIndex(tokens.size() - 1);
--n;
++i;
if (n == 0) {
return i;
}
}
}
} while (true);
}
Aggregations