use of io.debezium.relational.Column in project debezium by debezium.
the class JdbcConnection method readSchema.
/**
* Create definitions for each tables in the database, given the catalog name, schema pattern, table filter, and
* column filter.
*
* @param tables the set of table definitions to be modified; may not be null
* @param databaseCatalog the name of the catalog, which is typically the database name; may be null if all accessible
* databases are to be processed
* @param schemaNamePattern the pattern used to match database schema names, which may be "" to match only those tables with
* no schema or null to process all accessible tables regardless of database schema name
* @param tableFilter used to determine for which tables are to be processed; may be null if all accessible tables are to be
* processed
* @param columnFilter used to determine which columns should be included as fields in its table's definition; may
* be null if all columns for all tables are to be included
* @param removeTablesNotFoundInJdbc {@code true} if this method should remove from {@code tables} any definitions for tables
* that are not found in the database metadata, or {@code false} if such tables should be left untouched
* @throws SQLException if an error occurs while accessing the database metadata
*/
public void readSchema(Tables tables, String databaseCatalog, String schemaNamePattern, TableNameFilter tableFilter, ColumnNameFilter columnFilter, boolean removeTablesNotFoundInJdbc) throws SQLException {
// Before we make any changes, get the copy of the set of table IDs ...
Set<TableId> tableIdsBefore = new HashSet<>(tables.tableIds());
// Read the metadata for the table columns ...
DatabaseMetaData metadata = connection().getMetaData();
// Find views as they cannot be snapshotted
final Set<TableId> viewIds = new HashSet<>();
try (final ResultSet rs = metadata.getTables(databaseCatalog, schemaNamePattern, null, new String[] { "VIEW" })) {
while (rs.next()) {
final String catalogName = rs.getString(1);
final String schemaName = rs.getString(2);
final String tableName = rs.getString(3);
viewIds.add(new TableId(catalogName, schemaName, tableName));
}
}
ConcurrentMap<TableId, List<Column>> columnsByTable = new ConcurrentHashMap<>();
try (ResultSet rs = metadata.getColumns(databaseCatalog, schemaNamePattern, null, null)) {
while (rs.next()) {
String catalogName = rs.getString(1);
String schemaName = rs.getString(2);
String tableName = rs.getString(3);
TableId tableId = new TableId(catalogName, schemaName, tableName);
if (viewIds.contains(tableId)) {
continue;
}
if (tableFilter == null || tableFilter.matches(catalogName, schemaName, tableName)) {
List<Column> cols = columnsByTable.computeIfAbsent(tableId, name -> new ArrayList<>());
String columnName = rs.getString(4);
if (columnFilter == null || columnFilter.matches(catalogName, schemaName, tableName, columnName)) {
ColumnEditor column = Column.editor().name(columnName);
column.jdbcType(rs.getInt(5));
column.type(rs.getString(6));
column.length(rs.getInt(7));
column.scale(rs.getInt(9));
column.optional(isNullable(rs.getInt(11)));
column.position(rs.getInt(17));
column.autoIncremented("YES".equalsIgnoreCase(rs.getString(23)));
String autogenerated = null;
try {
autogenerated = rs.getString(24);
} catch (SQLException e) {
// ignore, some drivers don't have this index - e.g. Postgres
}
column.generated("YES".equalsIgnoreCase(autogenerated));
column.nativeType(resolveNativeType(column.typeName()));
cols.add(column.create());
}
}
}
}
// Read the metadata for the primary keys ...
for (TableId id : columnsByTable.keySet()) {
// First get the primary key information, which must be done for *each* table ...
List<String> pkColumnNames = null;
try (ResultSet rs = metadata.getPrimaryKeys(id.catalog(), id.schema(), id.table())) {
while (rs.next()) {
if (pkColumnNames == null)
pkColumnNames = new ArrayList<>();
String columnName = rs.getString(4);
int columnIndex = rs.getInt(5);
Collect.set(pkColumnNames, columnIndex - 1, columnName, null);
}
}
// Then define the table ...
List<Column> columns = columnsByTable.get(id);
Collections.sort(columns);
// JDBC does not expose character sets
String defaultCharsetName = null;
tables.overwriteTable(id, columns, pkColumnNames, defaultCharsetName);
}
if (removeTablesNotFoundInJdbc) {
// Remove any definitions for tables that were not found in the database metadata ...
tableIdsBefore.removeAll(columnsByTable.keySet());
tableIdsBefore.forEach(tables::removeTable);
}
}
use of io.debezium.relational.Column in project debezium by debezium.
the class DdlParser method parseColumnsInSelectClause.
/**
* Parse the column information in the SELECT clause. This statement stops before consuming the FROM clause.
*
* @param start the start of the statement
* @return the map of resolved Columns keyed by the column alias (or name) used in the SELECT statement; never null but
* possibly
* empty if we couldn't parse the SELECT clause correctly
*/
protected Map<String, Column> parseColumnsInSelectClause(Marker start) {
// Parse the column names ...
Map<String, String> tableAliasByColumnAlias = new LinkedHashMap<>();
Map<String, String> columnNameByAliases = new LinkedHashMap<>();
parseColumnName(start, tableAliasByColumnAlias, columnNameByAliases);
while (tokens.canConsume(',')) {
parseColumnName(start, tableAliasByColumnAlias, columnNameByAliases);
}
// Parse the FROM clause, but we'll back up to the start of this before we return ...
Marker startOfFrom = tokens.mark();
Map<String, Column> columnsByName = new LinkedHashMap<>();
Map<String, Table> fromTablesByAlias = parseSelectFromClause(start);
Table singleTable = fromTablesByAlias.size() == 1 ? fromTablesByAlias.values().stream().findFirst().get() : null;
tableAliasByColumnAlias.forEach((columnAlias, tableAlias) -> {
// Resolve the alias into the actual column name in the referenced table ...
String columnName = columnNameByAliases.getOrDefault(columnAlias, columnAlias);
Column column = null;
if (tableAlias == null) {
// The column was not qualified with a table, so there should be a single table ...
column = singleTable == null ? null : singleTable.columnWithName(columnName);
} else {
// The column was qualified with a table, so look it up ...
Table table = fromTablesByAlias.get(tableAlias);
column = table == null ? null : table.columnWithName(columnName);
}
if (column == null) {
// Check to see whether the column name contains a constant value, in which case we need to create an
// artificial column ...
column = createColumnFromConstant(columnAlias, columnName);
}
// column may be null
columnsByName.put(columnAlias, column);
});
tokens.rewind(startOfFrom);
return columnsByName;
}
use of io.debezium.relational.Column in project debezium by debezium.
the class ColumnMappers method mapperFor.
/**
* Get the value mapping function for the given column.
*
* @param tableId the identifier of the table to which the column belongs; may not be null
* @param column the column; may not be null
* @return the mapping function, or null if there is no mapping function
*/
public ColumnMapper mapperFor(TableId tableId, Column column) {
ColumnId id = new ColumnId(tableId, column.name());
Optional<MapperRule> matchingRule = rules.stream().filter(rule -> rule.matches(id)).findFirst();
if (matchingRule.isPresent()) {
return matchingRule.get().mapper;
}
return null;
}
use of io.debezium.relational.Column in project debezium by debezium.
the class MySqlDdlParserTest method shouldParseCreateTableStatementWithCollate.
@Test
@FixFor("DBZ-474")
public void shouldParseCreateTableStatementWithCollate() {
String ddl = "CREATE TABLE c1 (pk INT PRIMARY KEY, v1 CHAR(36) NOT NULL COLLATE utf8_unicode_ci);";
parser.parse(ddl, tables);
assertThat(tables.size()).isEqualTo(1);
Table table = tables.forTable(new TableId(null, null, "c1"));
assertThat(table).isNotNull();
assertColumn(table, "v1", "CHAR", Types.CHAR, 36, -1, false, false, false);
Column column = table.columnWithName("v1");
assertThat(column.typeUsesCharset()).isTrue();
}
use of io.debezium.relational.Column in project debezium by debezium.
the class MySqlDdlParser method parseCreateIndex.
protected void parseCreateIndex(Marker start) {
boolean unique = tokens.canConsume("UNIQUE");
tokens.canConsumeAnyOf("FULLTEXT", "SPATIAL");
tokens.consume("INDEX");
// index name
String indexName = tokens.consume();
if (tokens.matches("USING")) {
parseIndexType(start);
}
TableId tableId = null;
if (tokens.canConsume("ON")) {
// Usually this is required, but in some cases ON is not required
tableId = parseQualifiedTableName(start);
}
if (unique && tableId != null) {
// This is a unique index, and we can mark the index's columns as the primary key iff there is not already
// a primary key on the table. (Should a PK be created later via an alter, then it will overwrite this.)
TableEditor table = databaseTables.editTable(tableId);
if (table != null && !table.hasPrimaryKey()) {
List<String> names = parseIndexColumnNames(start);
if (table.columns().stream().allMatch(Column::isRequired)) {
databaseTables.overwriteTable(table.setPrimaryKeyNames(names).create());
}
}
}
// We don't care about any other statements or the rest of this statement ...
consumeRemainingStatement(start);
signalCreateIndex(indexName, tableId, start);
debugParsed(start);
}
Aggregations