use of org.eclipse.titan.common.parsers.TitanListener in project titan.EclipsePlug-ins by eclipse.
the class TTCN3Analyzer method parse.
/**
* Parse TTCN-3 file using ANTLR v4
* @param aReader file to parse (cannot be null, closes aReader)
* @param aFileLength file length
* @param aEclipseFile Eclipse dependent resource file
*/
private void parse(final Reader aReader, final int aFileLength, final IFile aEclipseFile) {
CharStream charStream = new UnbufferedCharStream(aReader);
Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
lexer.setCommentTodo(true);
lexer.setTokenFactory(new CommonTokenFactory(true));
lexer.initRootInterval(aFileLength);
TitanListener lexerListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
// 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
// Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
// pr_PatternChunk[StringBuilder builder, boolean[] uni]:
// $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
// 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
Ttcn3Parser parser = new Ttcn3Parser(tokenStream);
ParserUtilities.setBuildParseTree(parser);
PreprocessedTokenStream preprocessor = null;
if (aEclipseFile != null && GlobalParser.TTCNPP_EXTENSION.equals(aEclipseFile.getFileExtension())) {
lexer.setTTCNPP();
preprocessor = new PreprocessedTokenStream(lexer);
preprocessor.setActualFile(aEclipseFile);
if (aEclipseFile.getProject() != null) {
preprocessor.setMacros(PreprocessorSymbolsOptionsData.getTTCN3PreprocessorDefines(aEclipseFile.getProject()));
}
parser = new Ttcn3Parser(preprocessor);
ParserUtilities.setBuildParseTree(parser);
preprocessor.setActualLexer(lexer);
preprocessor.setParser(parser);
}
if (aEclipseFile != null) {
lexer.setActualFile(aEclipseFile);
parser.setActualFile(aEclipseFile);
parser.setProject(aEclipseFile.getProject());
}
// remove ConsoleErrorListener
parser.removeErrorListeners();
TitanListener parserListener = new TitanListener();
parser.addErrorListener(parserListener);
// This is added because of the following ANTLR 4 bug:
// Memory Leak in PredictionContextCache #499
// https://github.com/antlr/antlr4/issues/499
DFA[] decisionToDFA = parser.getInterpreter().decisionToDFA;
parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, new PredictionContextCache()));
// try SLL mode
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
// quit
}
if (!warnings.isEmpty() || !mErrorsStored.isEmpty()) {
// SLL mode might have failed, try LL mode
try {
CharStream charStream2 = new UnbufferedCharStream(aReader);
lexer.setInputStream(charStream2);
// lexer.reset();
parser.reset();
parserListener.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
final ParseTree root = parser.pr_TTCN3File();
ParserUtilities.logParseTree(root, parser);
warnings = parser.getWarnings();
mErrorsStored = lexerListener.getErrorsStored();
mErrorsStored.addAll(parserListener.getErrorsStored());
} catch (RecognitionException e) {
}
}
unsupportedConstructs = parser.getUnsupportedConstructs();
rootInterval = lexer.getRootInterval();
actualTtc3Module = parser.getModule();
if (preprocessor != null) {
// if the file was preprocessed
mErrorsStored.addAll(preprocessor.getErrorStorage());
warnings.addAll(preprocessor.getWarnings());
unsupportedConstructs.addAll(preprocessor.getUnsupportedConstructs());
if (actualTtc3Module != null) {
actualTtc3Module.setIncludedFiles(preprocessor.getIncludedFiles());
actualTtc3Module.setInactiveCodeLocations(preprocessor.getInactiveCodeLocations());
}
}
try {
aReader.close();
} catch (IOException e) {
}
}
use of org.eclipse.titan.common.parsers.TitanListener in project titan.EclipsePlug-ins by eclipse.
the class PreprocessedTokenStream method processIncludeDirective.
/**
* Adds a new lexer to the lexer stack to read tokens from the included
* file
*
* @param fileName
* the file name paramtere of the #include directive
*/
private void processIncludeDirective(final PreprocessorDirective ppDirective) {
if (ppDirective.str == null || "".equals(ppDirective.str)) {
TITANMarker marker = new TITANMarker("File name was not provided", ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
unsupportedConstructs.add(marker);
return;
}
IFile includedFile = GlobalParser.getProjectSourceParser(actualFile.getProject()).getTTCN3IncludeFileByName(ppDirective.str);
if (includedFile == null) {
TITANMarker marker = new TITANMarker(MessageFormat.format("Included file `{0}'' could not be found", ppDirective.str), ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
unsupportedConstructs.add(marker);
return;
}
// check extension
if (!GlobalParser.TTCNIN_EXTENSION.equals(includedFile.getFileExtension())) {
TITANMarker marker = new TITANMarker(MessageFormat.format("File `{0}'' does not have the `{1}'' extension", ppDirective.str, GlobalParser.TTCNIN_EXTENSION), ppDirective.line, -1, -1, IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL);
warnings.add(marker);
}
// check if the file is already loaded into an editor
String code = null;
if (EditorTracker.containsKey(includedFile)) {
List<ISemanticTITANEditor> editors = EditorTracker.getEditor(includedFile);
ISemanticTITANEditor editor = editors.get(0);
IDocument document = editor.getDocument();
code = document.get();
}
// create lexer and set it up
Reader reader = null;
CharStream charStream = null;
Ttcn3Lexer lexer = null;
int rootInt;
if (code != null) {
reader = new StringReader(code);
charStream = new UnbufferedCharStream(reader);
lexer = new Ttcn3Lexer(charStream);
lexer.setTokenFactory(new CommonTokenFactory(true));
rootInt = code.length();
} else {
try {
InputStreamReader temp = new InputStreamReader(includedFile.getContents());
if (!includedFile.getCharset().equals(temp.getEncoding())) {
try {
temp.close();
} catch (IOException e) {
ErrorReporter.logWarningExceptionStackTrace(e);
}
temp = new InputStreamReader(includedFile.getContents(), includedFile.getCharset());
}
reader = new BufferedReader(temp);
} catch (CoreException e) {
ErrorReporter.logExceptionStackTrace(e);
return;
} catch (UnsupportedEncodingException e) {
ErrorReporter.logExceptionStackTrace(e);
return;
}
charStream = new UnbufferedCharStream(reader);
lexer = new Ttcn3Lexer(charStream);
lexer.setTokenFactory(new CommonTokenFactory(true));
lexerListener = new TitanListener();
// remove ConsoleErrorListener
lexer.removeErrorListeners();
lexer.addErrorListener(lexerListener);
IFileStore store;
try {
store = EFS.getStore(includedFile.getLocationURI());
} catch (CoreException e) {
ErrorReporter.logExceptionStackTrace(e);
return;
}
IFileInfo fileInfo = store.fetchInfo();
rootInt = (int) fileInfo.getLength();
}
lexer.setTokenFactory(new CommonTokenFactory(true));
lexer.setTTCNPP();
lexer.initRootInterval(rootInt);
lexer.setActualFile(includedFile);
// add the lexer to the stack of lexers
tokenStreamStack.push(new TokenStreamData(lexer, includedFile, reader));
if (parser != null) {
parser.setActualFile(includedFile);
}
includedFiles.add(includedFile);
}
Aggregations