use of de.be4.classicalb.core.parser.node.Token in project probparsers by bendisposto.
the class BParser method parse.
/**
* Parses the input string.
*
* @param input
* The {@link String} to be parsed
* @param debugOutput
* output debug messages on standard out?
* @param contentProvider
* A {@link IFileContentProvider} that is able to load content of
* referenced files during the parsing process. The content
* provider is used for referenced definition files for example.
* @return the root node of the AST
* @throws BCompoundException
* The {@link BCompoundException} class stores all
* {@link BException}s occurred during the parsing process. The
* {@link BException} class stores the actual exception as
* delegate and forwards all method calls to it. So it is save
* for tools to just use this exception if they want to extract
* an error message. If the tools needs to extract additional
* information, such as a source code position or involved
* tokens respectively nodes, it needs to retrieve the delegate
* exception. The {@link BException} class offers a
* {@link BException#getCause()} method for this, which returns
* the delegate exception.
* <p>
* Internal exceptions:
* <ul>
* <li>{@link PreParseException}: This exception contains errors
* that occur during the preparsing. If possible it supplies a
* token, where the error occurred.</li>
* <li>{@link BLexerException}: If any error occurs in the
* generated or customized lexer a {@link LexerException} is
* thrown. Usually the lexer classes just throw a
* {@link LexerException}. But this class unfortunately does not
* contain any explicit information about the source code
* position where the error occurred. Using aspect-oriented
* programming we intercept the throwing of these exceptions to
* replace them by our own exception. In our own exception we
* provide the source code position of the last characters that
* were read from the input.</li>
* <li>{@link BParseException}: This exception is thrown in two
* situations. On the one hand if the parser throws a
* {@link ParserException} we convert it into a
* {@link BParseException}. On the other hand it can be thrown
* if any error is found during the AST transformations after
* the parser has finished.</li>
* <li>{@link CheckException}: If any problem occurs while
* performing semantic checks, a {@link CheckException} is
* thrown. We provide one or more nodes that are involved in the
* problem. For example, if we find duplicate machine clauses,
* we will list all occurrences in the exception.</li>
* </ul>
*/
public Start parse(final String input, final boolean debugOutput, final IFileContentProvider contentProvider) throws BCompoundException {
final Reader reader = new StringReader(input);
try {
// PreParsing
final DefinitionTypes defTypes = preParsing(debugOutput, reader, contentProvider, directory);
/*
* The definition types are used in the lexer in order to replace an
* identifier token by a definition call token. This is required if
* the definition is a predicate because an identifier can not be
* parsed as a predicate. For example "... SELECT def THEN ... "
* would yield to a parse error. The lexer will replace the
* identifier token "def" by a TDefLiteralPredicate which will be
* excepted by the parser
*
*/
defTypes.addAll(definitions.getTypes());
/*
* Main parser
*/
final BLexer lexer = new BLexer(new PushbackReader(reader, BLexer.PUSHBACK_BUFFER_SIZE), defTypes);
lexer.setParseOptions(parseOptions);
SabbleCCBParser parser = new SabbleCCBParser(lexer);
final Start rootNode = parser.parse();
final List<BException> bExceptionList = new ArrayList<>();
/*
* Collect available definition declarations. Needs to be done now
* cause they are needed by the following transformations.
*/
final DefinitionCollector collector = new DefinitionCollector(defTypes, this.definitions);
collector.collectDefinitions(rootNode);
List<CheckException> definitionsCollectorExceptions = collector.getExceptions();
for (CheckException checkException : definitionsCollectorExceptions) {
bExceptionList.add(new BException(getFileName(), checkException));
}
// perfom AST transformations that can't be done by SableCC
try {
applyAstTransformations(rootNode);
} catch (CheckException e) {
throw new BCompoundException(new BException(getFileName(), e));
}
// perform some semantic checks which are not done in the parser
List<CheckException> checkExceptions = performSemanticChecks(rootNode);
for (CheckException checkException : checkExceptions) {
bExceptionList.add(new BException(getFileName(), checkException));
}
if (!bExceptionList.isEmpty()) {
throw new BCompoundException(bExceptionList);
}
return rootNode;
} catch (final LexerException e) {
throw new BCompoundException(new BException(getFileName(), e));
} catch (final BParseException e) {
throw new BCompoundException(new BException(getFileName(), e));
} catch (final IOException e) {
throw new BCompoundException(new BException(getFileName(), e));
} catch (final PreParseException e) {
throw new BCompoundException(new BException(getFileName(), e));
} catch (final ParserException e) {
final Token token = e.getToken();
String msg = getImprovedErrorMessageBasedOnTheErrorToken(token);
if (msg == null) {
msg = e.getLocalizedMessage();
}
final String realMsg = e.getRealMsg();
throw new BCompoundException(new BException(getFileName(), new BParseException(token, msg, realMsg, e)));
} catch (BException e) {
throw new BCompoundException(e);
}
}
use of de.be4.classicalb.core.parser.node.Token in project probparsers by bendisposto.
the class PreLexer method collectRhs.
private void collectRhs() throws LexerException, IOException {
if (state.equals(State.DEFINITIONS_RHS) || (previousState != null && previousState.equals(State.DEFINITIONS_RHS))) {
if (rhsToken == null) {
// starting a new definition rhs
rhsToken = new TRhsBody("", -1, -1);
rhsBuffer = new StringBuilder();
} else {
final State nextState = getNextState();
// end of rhs reached?
if (nextState != null) {
// push current token back into reader
try {
unread(token);
} catch (IOException e) {
throw new IOException("Pushback buffer overflow on Token: " + token.getText());
}
// prepare rhs_body token to be the current one
((Token) rhsToken).setText(rhsBuffer.toString());
token = rhsToken;
rhsToken = null;
rhsBuffer = null;
state = nextState;
} else {
// first token after "==" sets start position
if (rhsToken.getLine() == -1) {
rhsToken.setLine(token.getLine());
rhsToken.setPos(token.getPos());
}
rhsBuffer.append(token.getText());
token = null;
}
}
}
}
use of de.be4.classicalb.core.parser.node.Token in project probparsers by bendisposto.
the class EBLexer method filter.
@Override
protected void filter() throws LexerException, IOException {
super.filter();
if (token != null && token instanceof TIdentifierLiteral && v.get(token.getText())) {
Queue<IToken> ts = getNextList();
List<IToken> toks = getTokenList();
int l = token.getLine();
int p = token.getPos();
TLeftPar t1 = new TLeftPar(l, p);
ts.add(t1);
toks.add(t1);
ts.add(token);
toks.add(token);
TEqual t2 = new TEqual("=", l, p);
ts.add(t2);
toks.add(t2);
TTrue t3 = new TTrue("TRUE", l, p);
ts.add(t3);
toks.add(t3);
TRightPar t4 = new TRightPar(l, p);
ts.add(t4);
toks.add(t4);
token = null;
}
}
use of de.be4.classicalb.core.parser.node.Token in project probparsers by bendisposto.
the class EventBLexer method endStringToken.
private void endStringToken() throws LexerException {
try {
/*
* Push back current token. We are going to insert our own string
* token into the token stream just before the current token. Reset
* state so that unread token can be recognized again in next lexer
* step.
*/
unread(token);
state = State.NORMAL;
// create text for string token
string.setText(createString());
} catch (final IOException e) {
throw new LexerException("IOException occured: " + e.getLocalizedMessage());
}
token = string;
string = null;
stringBuffer = null;
}
use of de.be4.classicalb.core.parser.node.Token in project probparsers by bendisposto.
the class SyntaxErrorsDetectedOnTokenStreamTest method checkForDublicateAndInDefinitionsClause2.
@Test
public void checkForDublicateAndInDefinitionsClause2() throws Exception {
String s = "MACHINE Definitions \n DEFINITIONS\n foo == \n \n 1=1 \n& & 2=2 \nEND";
try {
getTreeAsString(s);
fail("Duplicate & was not detected.");
} catch (BCompoundException e) {
System.out.println(e.getMessage());
// there is no token available, hence the position is in the text
assertTrue(e.getMessage().contains("[6,6]"));
assertTrue(e.getMessage().contains("& &"));
}
}
Aggregations