use of antlr.TokenStreamException in project groovy by apache.
the class GenericsUtils method parseClassNodesFromString.
public static ClassNode[] parseClassNodesFromString(final String option, final SourceUnit sourceUnit, final CompilationUnit compilationUnit, final MethodNode mn, final ASTNode usage) {
GroovyLexer lexer = new GroovyLexer(new StringReader("DummyNode<" + option + ">"));
final GroovyRecognizer rn = GroovyRecognizer.make(lexer);
try {
rn.classOrInterfaceType(true);
final AtomicReference<ClassNode> ref = new AtomicReference<ClassNode>();
AntlrParserPlugin plugin = new AntlrParserPlugin() {
@Override
public ModuleNode buildAST(final SourceUnit sourceUnit, final ClassLoader classLoader, final Reduction cst) throws ParserException {
ref.set(makeTypeWithArguments(rn.getAST()));
return null;
}
};
plugin.buildAST(null, null, null);
ClassNode parsedNode = ref.get();
// the returned node is DummyNode<Param1, Param2, Param3, ...)
GenericsType[] parsedNodeGenericsTypes = parsedNode.getGenericsTypes();
if (parsedNodeGenericsTypes == null) {
return null;
}
ClassNode[] signature = new ClassNode[parsedNodeGenericsTypes.length];
for (int i = 0; i < parsedNodeGenericsTypes.length; i++) {
final GenericsType genericsType = parsedNodeGenericsTypes[i];
signature[i] = resolveClassNode(sourceUnit, compilationUnit, mn, usage, genericsType.getType());
}
return signature;
} catch (RecognitionException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
} catch (TokenStreamException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
} catch (ParserException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
}
return null;
}
use of antlr.TokenStreamException in project groovy by apache.
the class AntlrParserPlugin method transformCSTIntoAST.
protected void transformCSTIntoAST(SourceUnit sourceUnit, Reader reader, SourceBuffer sourceBuffer) throws CompilationFailedException {
ast = null;
setController(sourceUnit);
// TODO find a way to inject any GroovyLexer/GroovyRecognizer
UnicodeEscapingReader unicodeReader = new UnicodeEscapingReader(reader, sourceBuffer);
UnicodeLexerSharedInputState inputState = new UnicodeLexerSharedInputState(unicodeReader);
GroovyLexer lexer = new GroovyLexer(inputState);
unicodeReader.setLexer(lexer);
GroovyRecognizer parser = GroovyRecognizer.make(lexer);
parser.setSourceBuffer(sourceBuffer);
tokenNames = parser.getTokenNames();
parser.setFilename(sourceUnit.getName());
// start parsing at the compilationUnit rule
try {
parser.compilationUnit();
} catch (TokenStreamRecognitionException tsre) {
RecognitionException e = tsre.recog;
SyntaxException se = new SyntaxException(e.getMessage(), e, e.getLine(), e.getColumn());
se.setFatal(true);
sourceUnit.addError(se);
} catch (RecognitionException e) {
SyntaxException se = new SyntaxException(e.getMessage(), e, e.getLine(), e.getColumn());
se.setFatal(true);
sourceUnit.addError(se);
} catch (TokenStreamException e) {
sourceUnit.addException(e);
}
ast = parser.getAST();
}
use of antlr.TokenStreamException in project sonarqube by SonarSource.
the class ValidWhenLexer method nextToken.
public Token nextToken() throws TokenStreamException {
Token theRetToken = null;
tryAgain: for (; ; ) {
Token _token = null;
int _ttype = Token.INVALID_TYPE;
resetText();
try {
// for char stream error handling
try {
// for lexical error handling
switch(LA(1)) {
case '\t':
case '\n':
case '\r':
case ' ':
{
mWS(true);
theRetToken = _returnToken;
break;
}
case '-':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
{
mDECIMAL_LITERAL(true);
theRetToken = _returnToken;
break;
}
case '"':
case '\'':
{
mSTRING_LITERAL(true);
theRetToken = _returnToken;
break;
}
case '[':
{
mLBRACKET(true);
theRetToken = _returnToken;
break;
}
case ']':
{
mRBRACKET(true);
theRetToken = _returnToken;
break;
}
case '(':
{
mLPAREN(true);
theRetToken = _returnToken;
break;
}
case ')':
{
mRPAREN(true);
theRetToken = _returnToken;
break;
}
case '*':
{
mTHIS(true);
theRetToken = _returnToken;
break;
}
case '.':
case '_':
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
case 'g':
case 'h':
case 'i':
case 'j':
case 'k':
case 'l':
case 'm':
case 'n':
case 'o':
case 'p':
case 'q':
case 'r':
case 's':
case 't':
case 'u':
case 'v':
case 'w':
case 'x':
case 'y':
case 'z':
{
mIDENTIFIER(true);
theRetToken = _returnToken;
break;
}
case '=':
{
mEQUALSIGN(true);
theRetToken = _returnToken;
break;
}
case '!':
{
mNOTEQUALSIGN(true);
theRetToken = _returnToken;
break;
}
default:
if ((LA(1) == '0') && (LA(2) == 'x')) {
mHEX_LITERAL(true);
theRetToken = _returnToken;
} else if ((LA(1) == '<') && (LA(2) == '=')) {
mLESSEQUALSIGN(true);
theRetToken = _returnToken;
} else if ((LA(1) == '>') && (LA(2) == '=')) {
mGREATEREQUALSIGN(true);
theRetToken = _returnToken;
} else if ((LA(1) == '0') && (true)) {
mOCTAL_LITERAL(true);
theRetToken = _returnToken;
} else if ((LA(1) == '<') && (true)) {
mLESSTHANSIGN(true);
theRetToken = _returnToken;
} else if ((LA(1) == '>') && (true)) {
mGREATERTHANSIGN(true);
theRetToken = _returnToken;
} else {
if (LA(1) == EOF_CHAR) {
uponEOF();
_returnToken = makeToken(Token.EOF_TYPE);
} else {
throw new NoViableAltForCharException((char) LA(1), getFilename(), getLine(), getColumn());
}
}
}
// found SKIP token
if (_returnToken == null)
continue tryAgain;
_ttype = _returnToken.getType();
_ttype = testLiteralsTable(_ttype);
_returnToken.setType(_ttype);
return _returnToken;
} catch (RecognitionException e) {
throw new TokenStreamRecognitionException(e);
}
} catch (CharStreamException cse) {
if (cse instanceof CharStreamIOException) {
throw new TokenStreamIOException(((CharStreamIOException) cse).io);
} else {
throw new TokenStreamException(cse.getMessage());
}
}
}
}
use of antlr.TokenStreamException in project querydsl by querydsl.
the class JPAIntegrationBase method query.
@Override
protected QueryHelper<?> query() {
return new QueryHelper<Void>(templates) {
@Override
public void parse() throws RecognitionException, TokenStreamException {
JPQLSerializer serializer = new JPQLSerializer(templates);
serializer.serialize(getMetadata(), false, null);
Query query = em.createQuery(serializer.toString());
JPAUtil.setConstants(query, serializer.getConstantToLabel(), getMetadata().getParams());
try {
query.getResultList();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
};
}
use of antlr.TokenStreamException in project groovy-core by groovy.
the class AntlrParserPlugin method transformCSTIntoAST.
protected void transformCSTIntoAST(SourceUnit sourceUnit, Reader reader, SourceBuffer sourceBuffer) throws CompilationFailedException {
ast = null;
setController(sourceUnit);
// TODO find a way to inject any GroovyLexer/GroovyRecognizer
UnicodeEscapingReader unicodeReader = new UnicodeEscapingReader(reader, sourceBuffer);
UnicodeLexerSharedInputState inputState = new UnicodeLexerSharedInputState(unicodeReader);
GroovyLexer lexer = new GroovyLexer(inputState);
unicodeReader.setLexer(lexer);
GroovyRecognizer parser = GroovyRecognizer.make(lexer);
parser.setSourceBuffer(sourceBuffer);
tokenNames = parser.getTokenNames();
parser.setFilename(sourceUnit.getName());
// start parsing at the compilationUnit rule
try {
parser.compilationUnit();
} catch (TokenStreamRecognitionException tsre) {
RecognitionException e = tsre.recog;
SyntaxException se = new SyntaxException(e.getMessage(), e, e.getLine(), e.getColumn());
se.setFatal(true);
sourceUnit.addError(se);
} catch (RecognitionException e) {
SyntaxException se = new SyntaxException(e.getMessage(), e, e.getLine(), e.getColumn());
se.setFatal(true);
sourceUnit.addError(se);
} catch (TokenStreamException e) {
sourceUnit.addException(e);
}
ast = parser.getAST();
}
Aggregations