use of net.sourceforge.sqlexplorer.parsers.Tokenizer.Token in project tdq-studio-se by Talend.
the class BasicQueryParser method parse.
/* (non-JavaDoc)
* @see net.sourceforge.sqlexplorer.parsers.QueryParser#parse()
*/
public void parse() throws ParserException {
if (sql == null)
return;
Preferences prefs = SQLExplorerPlugin.getDefault().getPluginPreferences();
if (prefs.getBoolean(IConstants.ENABLE_STRUCTURED_COMMENTS)) {
StringBuffer buffer = new StringBuffer(sql.toString());
Tokenizer tokenizer = new Tokenizer(buffer);
StructuredCommentParser structuredComments = new StructuredCommentParser(this, buffer);
// Otherwise just use a standard tokenizer
try {
Token token;
while ((token = tokenizer.nextToken()) != null) {
if (token.getTokenType() == Tokenizer.TokenType.EOL_COMMENT || token.getTokenType() == Tokenizer.TokenType.ML_COMMENT) {
structuredComments.addComment(token);
}
}
} catch (StructuredCommentException e) {
}
// Do the structured comments and then reset the tokenizer
structuredComments.process();
tokenizer.reset();
tokenizer = null;
sql = buffer;
}
_sQuerys = prepareSQL(sql.toString());
_sNextQuery = doParse();
sql = null;
}
use of net.sourceforge.sqlexplorer.parsers.Tokenizer.Token in project tdq-studio-se by Talend.
the class StructuredCommentParser method createCommand.
/**
* Attempts to create a AbstractCommand from a comment token
* @param comment the comment to parse
* @return the new AbstractCommand, or null if it is not a structured comment
* @throws StructuredCommentException
*/
protected Command createCommand(Token comment) throws ParserException {
StringBuffer sb = new StringBuffer(comment);
sb.delete(0, 2);
if (comment.getTokenType() == TokenType.ML_COMMENT)
sb.delete(sb.length() - 2, sb.length());
// Make sure it begins ${, but silently ignore it if not
int pos = sb.indexOf("}", 2);
if (sb.length() < 3 || !sb.substring(0, 2).equals("${") || pos < 0)
return null;
// Extract the command (ie the bit between "${" and "}") and the data (the bit after the "}")
String data = null;
if (pos < sb.length()) {
data = sb.substring(pos + 1).trim();
if (data.length() == 0)
data = null;
}
sb = new StringBuffer(sb.substring(2, pos));
// ...and has a word as the first token
Tokenizer tokenizer = new Tokenizer(sb);
Token token = tokenizer.nextToken();
if (token == null)
return null;
if (token.getTokenType() != TokenType.WORD)
throw new StructuredCommentException("Unexpected command in structured comment: " + token.toString(), comment);
// Create a new AbstractCommand
CommandType type;
try {
// I've kept the determination of CommandType outside of the constructor in case we want
// to instantiate different classes for the different commands.
type = CommandType.valueOf(token.toString().toUpperCase());
} catch (IllegalArgumentException e) {
throw new StructuredCommentException("Unrecognised structured comment command \"" + token.toString() + "\"", comment);
}
return type.createInstance(this, comment, tokenizer, data);
}
use of net.sourceforge.sqlexplorer.parsers.Tokenizer.Token in project tdq-studio-se by Talend.
the class AbstractSyntaxQueryParser method lookAhead.
/**
* Looks ahead a given number of places; returns null if there are not enough tokens.
* A distance of 1 is the next token, but leaves the current token as it is
* @param distance
* @return
*/
protected Tokenizer.Token lookAhead(int distance) throws ParserException {
// If we already have the future token cached, use it
if (futureTokens.size() <= distance)
return futureTokens.get(distance - 1);
// Wind forward until we've got our token....
Tokenizer.Token futureToken = null;
for (int i = 0; i < distance; i++) {
futureToken = nextToken(false);
if (futureToken == null) {
distance = i;
break;
}
}
// ...and then wind back
while (distance > 0) {
ungetToken();
distance--;
}
return futureToken;
}
use of net.sourceforge.sqlexplorer.parsers.Tokenizer.Token in project tdq-studio-se by Talend.
the class AbstractSyntaxQueryParser method parse.
/* (non-JavaDoc)
* @see net.sourceforge.sqlexplorer.parsers.QueryParser#parse()
*/
public void parse() throws ParserException {
if (enableStructuredComments) {
StructuredCommentParser preprocessor = new StructuredCommentParser(this, buffer);
// Otherwise just use a standard tokenizer
Token token;
tokenizer.reset();
while ((token = tokenizer.nextToken()) != null) {
if (token.getTokenType() == Tokenizer.TokenType.EOL_COMMENT || token.getTokenType() == Tokenizer.TokenType.ML_COMMENT) {
preprocessor.addComment(token);
}
}
// Do the structured comments and then reset the tokenizer
preprocessor.process();
tokenizer.reset();
}
// Do the parsing
parseQueries();
/*
* It's important to reset the tokenizer if structured comments are in use because some
* of the commands in structured comments can cause the SQL to be rewritten; when this
* happens the start and end locations in any tokens and the starting line numbers in
* queries and any tokens must be updated to reflect the edits. While we *could* update
* any state held by the tokenizer, this is unnecessary if the text has already been
* fully tokenized - reseting tokenizer to null is just to insist that it cannot be used
* again by accident.
*/
tokenizer = null;
}
Aggregations