use of net.sourceforge.sqlexplorer.parsers.scp.StructuredCommentParser in project tdq-studio-se by Talend.
the class BasicQueryParser method parse.
/* (non-JavaDoc)
* @see net.sourceforge.sqlexplorer.parsers.QueryParser#parse()
*/
public void parse() throws ParserException {
if (sql == null)
return;
Preferences prefs = SQLExplorerPlugin.getDefault().getPluginPreferences();
if (prefs.getBoolean(IConstants.ENABLE_STRUCTURED_COMMENTS)) {
StringBuffer buffer = new StringBuffer(sql.toString());
Tokenizer tokenizer = new Tokenizer(buffer);
StructuredCommentParser structuredComments = new StructuredCommentParser(this, buffer);
// Otherwise just use a standard tokenizer
try {
Token token;
while ((token = tokenizer.nextToken()) != null) {
if (token.getTokenType() == Tokenizer.TokenType.EOL_COMMENT || token.getTokenType() == Tokenizer.TokenType.ML_COMMENT) {
structuredComments.addComment(token);
}
}
} catch (StructuredCommentException e) {
}
// Do the structured comments and then reset the tokenizer
structuredComments.process();
tokenizer.reset();
tokenizer = null;
sql = buffer;
}
_sQuerys = prepareSQL(sql.toString());
_sNextQuery = doParse();
sql = null;
}
use of net.sourceforge.sqlexplorer.parsers.scp.StructuredCommentParser in project tdq-studio-se by Talend.
the class AbstractSyntaxQueryParser method parse.
/* (non-JavaDoc)
* @see net.sourceforge.sqlexplorer.parsers.QueryParser#parse()
*/
public void parse() throws ParserException {
if (enableStructuredComments) {
StructuredCommentParser preprocessor = new StructuredCommentParser(this, buffer);
// Otherwise just use a standard tokenizer
Token token;
tokenizer.reset();
while ((token = tokenizer.nextToken()) != null) {
if (token.getTokenType() == Tokenizer.TokenType.EOL_COMMENT || token.getTokenType() == Tokenizer.TokenType.ML_COMMENT) {
preprocessor.addComment(token);
}
}
// Do the structured comments and then reset the tokenizer
preprocessor.process();
tokenizer.reset();
}
// Do the parsing
parseQueries();
/*
* It's important to reset the tokenizer if structured comments are in use because some
* of the commands in structured comments can cause the SQL to be rewritten; when this
* happens the start and end locations in any tokens and the starting line numbers in
* queries and any tokens must be updated to reflect the edits. While we *could* update
* any state held by the tokenizer, this is unnecessary if the text has already been
* fully tokenized - reseting tokenizer to null is just to insist that it cannot be used
* again by accident.
*/
tokenizer = null;
}
Aggregations