Search in sources :

Example 1 with ParsingException

use of io.debezium.text.ParsingException in project debezium by debezium.

the class DdlParserSql2003 method parseColumnDefinition.

protected void parseColumnDefinition(Marker start, String columnName, TokenStream tokens, TableEditor table, ColumnEditor column, AtomicBoolean isPrimaryKey) {
    // Parse the data type, which must be at this location ...
    List<ParsingException> errors = new ArrayList<>();
    Marker dataTypeStart = tokens.mark();
    DataType dataType = dataTypeParser.parse(tokens, errors::addAll);
    if (dataType == null) {
        String dataTypeName = parseDomainName(start);
        if (dataTypeName != null)
            dataType = DataType.userDefinedType(dataTypeName);
    }
    if (dataType == null) {
        // No data type was found
        parsingFailed(dataTypeStart.position(), errors, "Unable to read the data type");
        return;
    }
    column.jdbcType(dataType.jdbcType());
    column.type(dataType.name(), dataType.expression());
    if (dataType.length() > -1)
        column.length((int) dataType.length());
    if (dataType.scale() > -1)
        column.scale(dataType.scale());
    if (tokens.matches("REFERENCES", "ARE")) {
        parseReferencesScopeCheck(start, columnName, tokens, column);
    }
    if (tokens.matches("DEFAULT")) {
        parseDefaultClause(start, column);
    } else if (tokens.matches("GENERATED")) {
        parseIdentityColumnSpec(start, column);
    }
    while (tokens.matchesAnyOf("NOT", "UNIQUE", "PRIMARY", "CHECK", "REFERENCES", "CONSTRAINT")) {
        parseColumnConstraintDefinition(start, column, isPrimaryKey);
    }
    if (tokens.canConsume("COLLATE")) {
        parseSchemaQualifiedName(start);
    }
}
Also used : ParsingException(io.debezium.text.ParsingException) ArrayList(java.util.ArrayList) Marker(io.debezium.text.TokenStream.Marker)

Example 2 with ParsingException

use of io.debezium.text.ParsingException in project debezium by debezium.

the class DdlTokenizer method tokenize.

@Override
public void tokenize(CharacterStream input, Tokens tokens) throws ParsingException {
    tokens = adapt(input, tokens);
    int startIndex;
    int endIndex;
    while (input.hasNext()) {
        char c = input.next();
        switch(c) {
            case ' ':
            case '\t':
            case '\n':
            case '\r':
                // Just skip these whitespace characters ...
                break;
            // ==============================================================================================
            case '#':
                {
                    startIndex = input.index();
                    Position startPosition = input.position(startIndex);
                    // End-of-line comment ...
                    boolean foundLineTerminator = false;
                    while (input.hasNext()) {
                        c = input.next();
                        if (c == '\n' || c == '\r') {
                            foundLineTerminator = true;
                            break;
                        }
                    }
                    // the token won't include the '\n' or '\r' character(s)
                    endIndex = input.index();
                    // must point beyond last char
                    if (!foundLineTerminator)
                        ++endIndex;
                    if (c == '\r' && input.isNext('\n'))
                        input.next();
                    if (useComments) {
                        tokens.addToken(startPosition, startIndex, endIndex, COMMENT);
                    }
                    break;
                }
            // ==============================================================================================
            case '-':
                {
                    startIndex = input.index();
                    Position startPosition = input.position(startIndex);
                    if (input.isNext('-')) {
                        // -- END OF LINE comment ...
                        boolean foundLineTerminator = false;
                        while (input.hasNext()) {
                            c = input.next();
                            if (c == '\n' || c == '\r') {
                                foundLineTerminator = true;
                                break;
                            }
                        }
                        // the token won't include the '\n' or '\r' character(s)
                        endIndex = input.index();
                        // must point beyond last char
                        if (!foundLineTerminator)
                            ++endIndex;
                        if (c == '\r' && input.isNext('\n'))
                            input.next();
                        if (useComments) {
                            tokens.addToken(startPosition, startIndex, endIndex, COMMENT);
                        }
                    } else {
                        // just a regular dash ...
                        tokens.addToken(startPosition, startIndex, startIndex + 1, SYMBOL);
                    }
                    break;
                }
            // ==============================================================================================
            case '(':
            case ')':
            case '{':
            case '}':
            case '*':
            case ',':
            case ';':
            case '+':
            case '%':
            case '?':
            case '[':
            case ']':
            case '!':
            case '<':
            case '>':
            case '|':
            case '=':
            case ':':
                tokens.addToken(input.position(input.index()), input.index(), input.index() + 1, SYMBOL);
                break;
            case '.':
                tokens.addToken(input.position(input.index()), input.index(), input.index() + 1, DECIMAL);
                break;
            case '\"':
                startIndex = input.index();
                Position startingPosition = input.position(startIndex);
                boolean foundClosingQuote = false;
                while (input.hasNext()) {
                    c = input.next();
                    if ((c == '\\' || c == '"') && input.isNext('"')) {
                        // consume the ' character since it is escaped
                        c = input.next();
                    } else if (c == '"') {
                        foundClosingQuote = true;
                        break;
                    }
                }
                if (!foundClosingQuote) {
                    String msg = "No matching double quote found after at line " + startingPosition.line() + ", column " + startingPosition.column();
                    throw new ParsingException(startingPosition, msg);
                }
                // beyond last character read
                endIndex = input.index() + 1;
                if (removeQuotes && endIndex - startIndex > 1) {
                    // At least one quoted character, so remove the quotes ...
                    startIndex += 1;
                    endIndex -= 1;
                }
                tokens.addToken(startingPosition, startIndex, endIndex, DOUBLE_QUOTED_STRING);
                break;
            // back-quote character
            case '`':
            // left single-quote character
            case '\u2018':
            // right single-quote character
            case '\u2019':
            case // single-quote character
            '\'':
                char quoteChar = c;
                startIndex = input.index();
                startingPosition = input.position(startIndex);
                foundClosingQuote = false;
                while (input.hasNext()) {
                    c = input.next();
                    if ((c == '\\' || c == quoteChar) && input.isNext(quoteChar)) {
                        // consume the character since it is escaped
                        c = input.next();
                    } else if (c == quoteChar) {
                        foundClosingQuote = true;
                        break;
                    }
                }
                if (!foundClosingQuote) {
                    String msg = "No matching single quote found after line " + startingPosition.line() + ", column " + startingPosition.column();
                    throw new ParsingException(startingPosition, msg);
                }
                // beyond last character read
                endIndex = input.index() + 1;
                if (removeQuotes && endIndex - startIndex > 1) {
                    // At least one quoted character, so remove the quotes ...
                    startIndex += 1;
                    endIndex -= 1;
                }
                tokens.addToken(startingPosition, startIndex, endIndex, SINGLE_QUOTED_STRING);
                break;
            case '/':
                startIndex = input.index();
                startingPosition = input.position(startIndex);
                if (input.isNext('/')) {
                    // End-of-line comment ...
                    boolean foundLineTerminator = false;
                    while (input.hasNext()) {
                        c = input.next();
                        if (c == '\n' || c == '\r') {
                            foundLineTerminator = true;
                            break;
                        }
                    }
                    // the token won't include the '\n' or '\r' character(s)
                    endIndex = input.index();
                    // must point beyond last char
                    if (!foundLineTerminator)
                        ++endIndex;
                    if (c == '\r' && input.isNext('\n'))
                        input.next();
                    if (useComments) {
                        tokens.addToken(startingPosition, startIndex, endIndex, COMMENT);
                    }
                } else if (input.isNext('*')) {
                    // Multi-line comment ...
                    while (input.hasNext() && !input.isNext('*', '/')) {
                        c = input.next();
                    }
                    // consume the '*'
                    if (input.hasNext())
                        input.next();
                    // consume the '/'
                    if (input.hasNext())
                        input.next();
                    // the token will include the '/' and '*' characters
                    endIndex = input.index() + 1;
                    if (useComments) {
                        tokens.addToken(startingPosition, startIndex, endIndex, COMMENT);
                    }
                } else {
                    // just a regular slash ...
                    tokens.addToken(startingPosition, startIndex, startIndex + 1, SYMBOL);
                }
                break;
            default:
                startIndex = input.index();
                Position startPosition = input.position(startIndex);
                // Read until another whitespace/symbol/decimal/slash/quote is found
                while (input.hasNext() && !(input.isNextWhitespace() || input.isNextAnyOf("/.-(){}*,;+%?[]!<>|=:'`\u2018\u2019\"\u2019"))) {
                    c = input.next();
                }
                // beyond last character that was included
                endIndex = input.index() + 1;
                tokens.addToken(startPosition, startIndex, endIndex, WORD);
        }
    }
}
Also used : Position(io.debezium.text.Position) ParsingException(io.debezium.text.ParsingException)

Example 3 with ParsingException

use of io.debezium.text.ParsingException in project debezium by debezium.

the class MySqlDdlParser method parseAlterTable.

protected void parseAlterTable(Marker start) {
    tokens.canConsume("IGNORE");
    tokens.consume("TABLE");
    TableId tableId = parseQualifiedTableName(start);
    TableEditor table = databaseTables.editTable(tableId);
    TableId oldTableId = null;
    if (table != null) {
        AtomicReference<TableId> newTableName = new AtomicReference<>(null);
        if (!tokens.matches(terminator()) && !tokens.matches("PARTITION")) {
            parseAlterSpecificationList(start, table, newTableName::set);
        }
        if (tokens.matches("PARTITION")) {
            parsePartitionOptions(start, table);
        }
        databaseTables.overwriteTable(table.create());
        if (newTableName.get() != null) {
            // the table was renamed ...
            Table renamed = databaseTables.renameTable(tableId, newTableName.get());
            if (renamed != null) {
                oldTableId = tableId;
                tableId = renamed.id();
            }
        }
    } else {
        Marker marker = tokens.mark();
        try {
            // We don't know about this table but we still have to parse the statement ...
            table = TableEditor.noOp(tableId);
            if (!tokens.matches(terminator()) && !tokens.matches("PARTITION")) {
                parseAlterSpecificationList(start, table, str -> {
                });
            }
            if (tokens.matches("PARTITION")) {
                parsePartitionOptions(start, table);
            }
            parseTableOptions(start, table);
        // do nothing with this
        } catch (ParsingException e) {
            tokens.rewind(marker);
            consumeRemainingStatement(start);
        }
    }
    signalAlterTable(tableId, oldTableId, start);
}
Also used : TableId(io.debezium.relational.TableId) Table(io.debezium.relational.Table) ParsingException(io.debezium.text.ParsingException) AtomicReference(java.util.concurrent.atomic.AtomicReference) Marker(io.debezium.text.TokenStream.Marker) TableEditor(io.debezium.relational.TableEditor)

Example 4 with ParsingException

use of io.debezium.text.ParsingException in project debezium by debezium.

the class MySqlSchema method applyDdl.

/**
 * Apply the supplied DDL statements to this database schema and record the history. If a {@code statementConsumer} is
 * supplied, then call it for each sub-sequence of the DDL statements that all apply to the same database.
 * <p>
 * Typically DDL statements are applied using a connection to a single database, and unless the statements use fully-qualified
 * names, the DDL statements apply to this database.
 *
 * @param source the current {@link SourceInfo#partition()} and {@link SourceInfo#offset() offset} at which these changes are
 *            found; may not be null
 * @param databaseName the name of the default database under which these statements are applied; may not be null
 * @param ddlStatements the {@code ;}-separated DDL statements; may be null or empty
 * @param statementConsumer the consumer that should be called with each sub-sequence of DDL statements that apply to
 *            a single database; may be null if no action is to be performed with the changes
 * @return {@code true} if changes were made to the database schema, or {@code false} if the DDL statements had no
 *         effect on the database schema
 */
public boolean applyDdl(SourceInfo source, String databaseName, String ddlStatements, DatabaseStatementStringConsumer statementConsumer) {
    Set<TableId> changes;
    if (ignoredQueryStatements.contains(ddlStatements))
        return false;
    try {
        this.ddlChanges.reset();
        this.ddlParser.setCurrentSchema(databaseName);
        this.ddlParser.parse(ddlStatements, tables);
    } catch (ParsingException e) {
        if (skipUnparseableDDL) {
            logger.warn("Ignoring unparseable DDL statement '{}': {}", ddlStatements);
        } else {
            throw e;
        }
    } finally {
        changes = tables.drainChanges();
        // for controlling this, too
        if (!storeOnlyMonitoredTablesDdl || !changes.isEmpty()) {
            if (statementConsumer != null) {
                if (!ddlChanges.isEmpty() && ddlChanges.applyToMoreDatabasesThan(databaseName)) {
                    // We understood at least some of the DDL statements and can figure out to which database they apply.
                    // They also apply to more databases than 'databaseName', so we need to apply the DDL statements in
                    // the same order they were read for each _affected_ database, grouped together if multiple apply
                    // to the same _affected_ database...
                    ddlChanges.groupStatementStringsByDatabase((dbName, ddl) -> {
                        if (filters.databaseFilter().test(dbName) || dbName == null || "".equals(dbName)) {
                            if (dbName == null)
                                dbName = "";
                            statementConsumer.consume(dbName, ddlStatements);
                        }
                    });
                } else if (filters.databaseFilter().test(databaseName) || databaseName == null || "".equals(databaseName)) {
                    if (databaseName == null)
                        databaseName = "";
                    statementConsumer.consume(databaseName, ddlStatements);
                }
            }
            // schema change records.
            try {
                if (!storeOnlyMonitoredTablesDdl || changes.stream().anyMatch(filters().tableFilter()::test)) {
                    dbHistory.record(source.partition(), source.offset(), databaseName, ddlStatements);
                } else {
                    logger.debug("Changes for DDL '{}' were filtered and not recorded in database history", ddlStatements);
                }
            } catch (Throwable e) {
                throw new ConnectException("Error recording the DDL statement(s) in the database history " + dbHistory + ": " + ddlStatements, e);
            }
        }
    }
    // Figure out what changed ...
    changes.forEach(tableId -> {
        Table table = tables.forTable(tableId);
        if (table == null) {
            // removed
            tableSchemaByTableId.remove(tableId);
        } else {
            TableSchema schema = schemaBuilder.create(schemaPrefix, getEnvelopeSchemaName(table), table, filters.columnFilter(), filters.columnMappers());
            tableSchemaByTableId.put(tableId, schema);
        }
    });
    return true;
}
Also used : TableId(io.debezium.relational.TableId) Table(io.debezium.relational.Table) TableSchema(io.debezium.relational.TableSchema) ParsingException(io.debezium.text.ParsingException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 5 with ParsingException

use of io.debezium.text.ParsingException in project debezium by debezium.

the class MySqlDdlParser method parseCreateDefinition.

/**
 * @param isAlterStatement whether this is an ALTER TABLE statement or not (i.e. CREATE TABLE)
 */
protected void parseCreateDefinition(Marker start, TableEditor table, boolean isAlterStatement) {
    // If the first token is a quoted identifier, then we know it is a column name ...
    Collection<ParsingException> errors = null;
    boolean quoted = isNextTokenQuotedIdentifier();
    Marker defnStart = tokens.mark();
    if (!quoted) {
        // The first token is not quoted so let's check for other expressions ...
        if (tokens.canConsume("CHECK")) {
            // Try to parse the constraints first ...
            consumeExpression(start);
            return;
        }
        if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "PRIMARY", "KEY") || tokens.canConsume("CONSTRAINT", "PRIMARY", "KEY") || tokens.canConsume("PRIMARY", "KEY")) {
            try {
                if (tokens.canConsume("USING")) {
                    parseIndexType(start);
                }
                if (!tokens.matches('(')) {
                    // index name
                    tokens.consume();
                }
                List<String> pkColumnNames = parseIndexColumnNames(start);
                table.setPrimaryKeyNames(pkColumnNames);
                parseIndexOptions(start);
                // MySQL does not allow a primary key to have nullable columns, so let's make sure we model that correctly ...
                pkColumnNames.forEach(name -> {
                    Column c = table.columnWithName(name);
                    if (c != null && c.isOptional()) {
                        table.addColumn(c.edit().optional(false).create());
                    }
                });
                return;
            } catch (ParsingException e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            } catch (MultipleParsingExceptions e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            }
        }
        if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "UNIQUE") || tokens.canConsume("CONSTRAINT", "UNIQUE") || tokens.canConsume("UNIQUE")) {
            tokens.canConsumeAnyOf("KEY", "INDEX");
            try {
                if (!tokens.matches('(')) {
                    if (!tokens.matches("USING")) {
                        // name of unique index ...
                        tokens.consume();
                    }
                    if (tokens.matches("USING")) {
                        parseIndexType(start);
                    }
                }
                List<String> uniqueKeyColumnNames = parseIndexColumnNames(start);
                if (table.primaryKeyColumnNames().isEmpty()) {
                    // this may eventually get overwritten by a real PK
                    table.setPrimaryKeyNames(uniqueKeyColumnNames);
                }
                parseIndexOptions(start);
                return;
            } catch (ParsingException e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            } catch (MultipleParsingExceptions e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            }
        }
        if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "FOREIGN", "KEY") || tokens.canConsume("FOREIGN", "KEY")) {
            try {
                if (!tokens.matches('(')) {
                    // name of foreign key
                    tokens.consume();
                }
                parseIndexColumnNames(start);
                if (tokens.matches("REFERENCES")) {
                    parseReferenceDefinition(start);
                }
                return;
            } catch (ParsingException e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            } catch (MultipleParsingExceptions e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            }
        }
        if (tokens.canConsumeAnyOf("INDEX", "KEY")) {
            try {
                if (!tokens.matches('(')) {
                    if (!tokens.matches("USING")) {
                        // name of unique index ...
                        tokens.consume();
                    }
                    if (tokens.matches("USING")) {
                        parseIndexType(start);
                    }
                }
                parseIndexColumnNames(start);
                parseIndexOptions(start);
                return;
            } catch (ParsingException e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            } catch (MultipleParsingExceptions e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            }
        }
        if (tokens.canConsumeAnyOf("FULLTEXT", "SPATIAL")) {
            try {
                tokens.canConsumeAnyOf("INDEX", "KEY");
                if (!tokens.matches('(')) {
                    // name of unique index ...
                    tokens.consume();
                }
                parseIndexColumnNames(start);
                parseIndexOptions(start);
                return;
            } catch (ParsingException e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            } catch (MultipleParsingExceptions e) {
                // Invalid names, so rewind and continue
                errors = accumulateParsingFailure(e, errors);
                tokens.rewind(defnStart);
            }
        }
    }
    try {
        // It's either quoted (meaning it's a column definition)
        if (isAlterStatement && !quoted) {
            // optional for ALTER TABLE
            tokens.canConsume("COLUMN");
        }
        String columnName = parseColumnName();
        parseCreateColumn(start, table, columnName, null);
    } catch (ParsingException e) {
        if (errors != null) {
            errors = accumulateParsingFailure(e, errors);
            throw new MultipleParsingExceptions(errors);
        }
        throw e;
    } catch (MultipleParsingExceptions e) {
        if (errors != null) {
            errors = accumulateParsingFailure(e, errors);
            throw new MultipleParsingExceptions(errors);
        }
        throw e;
    }
}
Also used : MultipleParsingExceptions(io.debezium.text.MultipleParsingExceptions) Column(io.debezium.relational.Column) ParsingException(io.debezium.text.ParsingException) Marker(io.debezium.text.TokenStream.Marker)

Aggregations

ParsingException (io.debezium.text.ParsingException)9 Marker (io.debezium.text.TokenStream.Marker)7 TableId (io.debezium.relational.TableId)3 ArrayList (java.util.ArrayList)3 Table (io.debezium.relational.Table)2 TableEditor (io.debezium.relational.TableEditor)2 Column (io.debezium.relational.Column)1 TableSchema (io.debezium.relational.TableSchema)1 DataType (io.debezium.relational.ddl.DataType)1 MultipleParsingExceptions (io.debezium.text.MultipleParsingExceptions)1 Position (io.debezium.text.Position)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 ConnectException (org.apache.kafka.connect.errors.ConnectException)1