use of io.debezium.relational.TableId in project debezium by debezium.
the class MySqlDdlParser method parseAlterTable.
protected void parseAlterTable(Marker start) {
tokens.canConsume("IGNORE");
tokens.consume("TABLE");
TableId tableId = parseQualifiedTableName(start);
TableEditor table = databaseTables.editTable(tableId);
TableId oldTableId = null;
if (table != null) {
AtomicReference<TableId> newTableName = new AtomicReference<>(null);
if (!tokens.matches(terminator()) && !tokens.matches("PARTITION")) {
parseAlterSpecificationList(start, table, newTableName::set);
}
if (tokens.matches("PARTITION")) {
parsePartitionOptions(start, table);
}
databaseTables.overwriteTable(table.create());
if (newTableName.get() != null) {
// the table was renamed ...
Table renamed = databaseTables.renameTable(tableId, newTableName.get());
if (renamed != null) {
oldTableId = tableId;
tableId = renamed.id();
}
}
} else {
Marker marker = tokens.mark();
try {
// We don't know about this table but we still have to parse the statement ...
table = TableEditor.noOp(tableId);
if (!tokens.matches(terminator()) && !tokens.matches("PARTITION")) {
parseAlterSpecificationList(start, table, str -> {
});
}
if (tokens.matches("PARTITION")) {
parsePartitionOptions(start, table);
}
parseTableOptions(start, table);
// do nothing with this
} catch (ParsingException e) {
tokens.rewind(marker);
consumeRemainingStatement(start);
}
}
signalAlterTable(tableId, oldTableId, start);
}
use of io.debezium.relational.TableId in project debezium by debezium.
the class MySqlDdlParser method parseCreateIndex.
protected void parseCreateIndex(Marker start) {
boolean unique = tokens.canConsume("UNIQUE");
tokens.canConsumeAnyOf("FULLTEXT", "SPATIAL");
tokens.consume("INDEX");
// index name
String indexName = tokens.consume();
if (tokens.matches("USING")) {
parseIndexType(start);
}
TableId tableId = null;
if (tokens.canConsume("ON")) {
// Usually this is required, but in some cases ON is not required
tableId = parseQualifiedTableName(start);
}
if (unique && tableId != null) {
// This is a unique index, and we can mark the index's columns as the primary key iff there is not already
// a primary key on the table. (Should a PK be created later via an alter, then it will overwrite this.)
TableEditor table = databaseTables.editTable(tableId);
if (table != null && !table.hasPrimaryKey()) {
List<String> names = parseIndexColumnNames(start);
if (table.columns().stream().allMatch(Column::isRequired)) {
databaseTables.overwriteTable(table.setPrimaryKeyNames(names).create());
}
}
}
// We don't care about any other statements or the rest of this statement ...
consumeRemainingStatement(start);
signalCreateIndex(indexName, tableId, start);
debugParsed(start);
}
use of io.debezium.relational.TableId in project debezium by debezium.
the class MySqlSchema method applyDdl.
/**
* Apply the supplied DDL statements to this database schema and record the history. If a {@code statementConsumer} is
* supplied, then call it for each sub-sequence of the DDL statements that all apply to the same database.
* <p>
* Typically DDL statements are applied using a connection to a single database, and unless the statements use fully-qualified
* names, the DDL statements apply to this database.
*
* @param source the current {@link SourceInfo#partition()} and {@link SourceInfo#offset() offset} at which these changes are
* found; may not be null
* @param databaseName the name of the default database under which these statements are applied; may not be null
* @param ddlStatements the {@code ;}-separated DDL statements; may be null or empty
* @param statementConsumer the consumer that should be called with each sub-sequence of DDL statements that apply to
* a single database; may be null if no action is to be performed with the changes
* @return {@code true} if changes were made to the database schema, or {@code false} if the DDL statements had no
* effect on the database schema
*/
public boolean applyDdl(SourceInfo source, String databaseName, String ddlStatements, DatabaseStatementStringConsumer statementConsumer) {
Set<TableId> changes;
if (ignoredQueryStatements.contains(ddlStatements))
return false;
try {
this.ddlChanges.reset();
this.ddlParser.setCurrentSchema(databaseName);
this.ddlParser.parse(ddlStatements, tables);
} catch (ParsingException e) {
if (skipUnparseableDDL) {
logger.warn("Ignoring unparseable DDL statement '{}': {}", ddlStatements);
} else {
throw e;
}
} finally {
changes = tables.drainChanges();
// for controlling this, too
if (!storeOnlyMonitoredTablesDdl || !changes.isEmpty()) {
if (statementConsumer != null) {
if (!ddlChanges.isEmpty() && ddlChanges.applyToMoreDatabasesThan(databaseName)) {
// We understood at least some of the DDL statements and can figure out to which database they apply.
// They also apply to more databases than 'databaseName', so we need to apply the DDL statements in
// the same order they were read for each _affected_ database, grouped together if multiple apply
// to the same _affected_ database...
ddlChanges.groupStatementStringsByDatabase((dbName, ddl) -> {
if (filters.databaseFilter().test(dbName) || dbName == null || "".equals(dbName)) {
if (dbName == null)
dbName = "";
statementConsumer.consume(dbName, ddlStatements);
}
});
} else if (filters.databaseFilter().test(databaseName) || databaseName == null || "".equals(databaseName)) {
if (databaseName == null)
databaseName = "";
statementConsumer.consume(databaseName, ddlStatements);
}
}
// schema change records.
try {
if (!storeOnlyMonitoredTablesDdl || changes.stream().anyMatch(filters().tableFilter()::test)) {
dbHistory.record(source.partition(), source.offset(), databaseName, ddlStatements);
} else {
logger.debug("Changes for DDL '{}' were filtered and not recorded in database history", ddlStatements);
}
} catch (Throwable e) {
throw new ConnectException("Error recording the DDL statement(s) in the database history " + dbHistory + ": " + ddlStatements, e);
}
}
}
// Figure out what changed ...
changes.forEach(tableId -> {
Table table = tables.forTable(tableId);
if (table == null) {
// removed
tableSchemaByTableId.remove(tableId);
} else {
TableSchema schema = schemaBuilder.create(schemaPrefix, getEnvelopeSchemaName(table), table, filters.columnFilter(), filters.columnMappers());
tableSchemaByTableId.put(tableId, schema);
}
});
return true;
}
use of io.debezium.relational.TableId in project debezium by debezium.
the class RecordsStreamProducer method process.
private void process(ReplicationMessage message, Long lsn, BlockingConsumer<ChangeEvent> consumer) throws SQLException, InterruptedException {
if (message == null) {
// in some cases we can get null if PG gives us back a message earlier than the latest reported flushed LSN
return;
}
TableId tableId = PostgresSchema.parse(message.getTable());
assert tableId != null;
// update the source info with the coordinates for this message
long commitTimeNs = message.getCommitTime();
long txId = message.getTransactionId();
sourceInfo.update(lsn, commitTimeNs, txId);
if (logger.isDebugEnabled()) {
logger.debug("received new message at position {}\n{}", ReplicationConnection.format(lsn), message);
}
TableSchema tableSchema = tableSchemaFor(tableId);
if (tableSchema == null) {
return;
}
if (tableSchema.keySchema() == null) {
logger.warn("ignoring message for table '{}' because it does not have a primary key defined", tableId);
}
ReplicationMessage.Operation operation = message.getOperation();
switch(operation) {
case INSERT:
{
Object[] row = columnValues(message.getNewTupleList(), tableId, true, message.hasTypeMetadata());
generateCreateRecord(tableId, row, message.isLastEventForLsn(), consumer);
break;
}
case UPDATE:
{
Object[] newRow = columnValues(message.getNewTupleList(), tableId, true, message.hasTypeMetadata());
Object[] oldRow = columnValues(message.getOldTupleList(), tableId, false, message.hasTypeMetadata());
generateUpdateRecord(tableId, oldRow, newRow, message.isLastEventForLsn(), consumer);
break;
}
case DELETE:
{
Object[] row = columnValues(message.getOldTupleList(), tableId, false, message.hasTypeMetadata());
generateDeleteRecord(tableId, row, message.isLastEventForLsn(), consumer);
break;
}
default:
{
logger.warn("unknown message operation: " + operation);
}
}
}
use of io.debezium.relational.TableId in project debezium by debezium.
the class BinlogReader method handleUpdateTableMetadata.
/**
* Handle a change in the table metadata.
* <p>
* This method should be called whenever we consume a TABLE_MAP event, and every transaction in the log should include one
* of these for each table affected by the transaction. Each table map event includes a monotonically-increasing numeric
* identifier, and this identifier is used within subsequent events within the same transaction. This table identifier can
* change when:
* <ol>
* <li>the table structure is modified (e.g., via an {@code ALTER TABLE ...} command); or</li>
* <li>MySQL rotates to a new binary log file, even if the table structure does not change.</li>
* </ol>
*
* @param event the update event; never null
*/
protected void handleUpdateTableMetadata(Event event) {
TableMapEventData metadata = unwrapData(event);
long tableNumber = metadata.getTableId();
String databaseName = metadata.getDatabase();
String tableName = metadata.getTable();
TableId tableId = new TableId(databaseName, null, tableName);
if (recordMakers.assign(tableNumber, tableId)) {
logger.debug("Received update table metadata event: {}", event);
} else {
informAboutUnknownTableIfRequired(event, tableId, "update table metadata");
}
}
Aggregations