Search in sources :

Example 11 with TableSchema

use of io.debezium.relational.TableSchema in project debezium by debezium.

the class RecordsSnapshotProducer method generateReadRecord.

protected void generateReadRecord(TableId tableId, Object[] rowData) {
    if (rowData.length == 0) {
        return;
    }
    TableSchema tableSchema = schema().schemaFor(tableId);
    assert tableSchema != null;
    Object key = tableSchema.keyFromColumnData(rowData);
    Struct value = tableSchema.valueFromColumnData(rowData);
    if (key == null || value == null) {
        return;
    }
    Schema keySchema = tableSchema.keySchema();
    sourceInfo.update(clock().currentTimeInMicros());
    Map<String, ?> partition = sourceInfo.partition();
    Map<String, ?> offset = sourceInfo.offset();
    String topicName = topicSelector().topicNameFor(tableId);
    Envelope envelope = tableSchema.getEnvelopeSchema();
    currentRecord.set(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.read(value, sourceInfo.source(), clock().currentTimeInMillis())));
}
Also used : TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) Envelope(io.debezium.data.Envelope) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct)

Example 12 with TableSchema

use of io.debezium.relational.TableSchema in project debezium by debezium.

the class RecordsStreamProducer method tableSchemaFor.

private TableSchema tableSchemaFor(TableId tableId) throws SQLException {
    PostgresSchema schema = schema();
    if (schema.isFilteredOut(tableId)) {
        logger.debug("table '{}' is filtered out, ignoring", tableId);
        return null;
    }
    TableSchema tableSchema = schema.schemaFor(tableId);
    if (tableSchema != null) {
        return tableSchema;
    }
    // which means that is a newly created table; so refresh our schema to get the definition for this table
    try (final PostgresConnection connection = taskContext.createConnection()) {
        schema.refresh(connection, tableId);
    }
    tableSchema = schema.schemaFor(tableId);
    if (tableSchema == null) {
        logger.warn("cannot load schema for table '{}'", tableId);
        return null;
    } else {
        logger.debug("refreshed DB schema to include table '{}'", tableId);
        return tableSchema;
    }
}
Also used : TableSchema(io.debezium.relational.TableSchema) PostgresConnection(io.debezium.connector.postgresql.connection.PostgresConnection)

Example 13 with TableSchema

use of io.debezium.relational.TableSchema in project debezium by debezium.

the class RecordsStreamProducer method generateDeleteRecord.

protected void generateDeleteRecord(TableId tableId, Object[] oldRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
    if (oldRowData == null || oldRowData.length == 0) {
        logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
        return;
    }
    TableSchema tableSchema = schema().schemaFor(tableId);
    assert tableSchema != null;
    Object key = tableSchema.keyFromColumnData(oldRowData);
    Struct value = tableSchema.valueFromColumnData(oldRowData);
    if (key == null || value == null) {
        return;
    }
    Schema keySchema = tableSchema.keySchema();
    Map<String, ?> partition = sourceInfo.partition();
    Map<String, ?> offset = sourceInfo.offset();
    String topicName = topicSelector().topicNameFor(tableId);
    Envelope envelope = tableSchema.getEnvelopeSchema();
    // create the regular delete record
    ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.delete(value, sourceInfo.source(), clock().currentTimeInMillis())), isLastEventForLsn);
    if (logger.isDebugEnabled()) {
        logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
    }
    recordConsumer.accept(changeEvent);
    // And send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
    if (taskContext.config().isEmitTombstoneOnDelete()) {
        changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, null, null), isLastEventForLsn);
        if (logger.isDebugEnabled()) {
            logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
        }
        recordConsumer.accept(changeEvent);
    }
}
Also used : TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) Envelope(io.debezium.data.Envelope) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct)

Example 14 with TableSchema

use of io.debezium.relational.TableSchema in project debezium by debezium.

the class RecordsStreamProducer method generateUpdateRecord.

protected void generateUpdateRecord(TableId tableId, Object[] oldRowData, Object[] newRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
    if (newRowData == null || newRowData.length == 0) {
        logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
        return;
    }
    Schema oldKeySchema = null;
    Struct oldValue = null;
    Object oldKey = null;
    TableSchema tableSchema = schema().schemaFor(tableId);
    assert tableSchema != null;
    if (oldRowData != null && oldRowData.length > 0) {
        oldKey = tableSchema.keyFromColumnData(oldRowData);
        oldKeySchema = tableSchema.keySchema();
        oldValue = tableSchema.valueFromColumnData(oldRowData);
    }
    Object newKey = tableSchema.keyFromColumnData(newRowData);
    Struct newValue = tableSchema.valueFromColumnData(newRowData);
    Schema newKeySchema = tableSchema.keySchema();
    Map<String, ?> partition = sourceInfo.partition();
    Map<String, ?> offset = sourceInfo.offset();
    String topicName = topicSelector().topicNameFor(tableId);
    Envelope envelope = tableSchema.getEnvelopeSchema();
    Struct source = sourceInfo.source();
    if (oldKey != null && !Objects.equals(oldKey, newKey)) {
        // the primary key has changed, so we need to send a DELETE followed by a CREATE
        // then send a delete event for the old key ...
        ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, envelope.schema(), envelope.delete(oldValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
        if (logger.isDebugEnabled()) {
            logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
        }
        recordConsumer.accept(changeEvent);
        if (taskContext.config().isEmitTombstoneOnDelete()) {
            // send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
            changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, null, null), isLastEventForLsn);
            if (logger.isDebugEnabled()) {
                logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
            }
            recordConsumer.accept(changeEvent);
        }
        // then send a create event for the new key...
        changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.create(newValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
        if (logger.isDebugEnabled()) {
            logger.debug("sending create event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
        }
        recordConsumer.accept(changeEvent);
    } else {
        SourceRecord record = new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.update(oldValue, newValue, source, clock().currentTimeInMillis()));
        recordConsumer.accept(new ChangeEvent(record, isLastEventForLsn));
    }
}
Also used : TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) Envelope(io.debezium.data.Envelope) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct)

Example 15 with TableSchema

use of io.debezium.relational.TableSchema in project debezium by debezium.

the class PostgresSchemaIT method assertColumnsExcluded.

protected void assertColumnsExcluded(String... columnNames) {
    Arrays.stream(columnNames).forEach(fqColumnName -> {
        int lastDotIdx = fqColumnName.lastIndexOf(".");
        String fullyQualifiedTableName = fqColumnName.substring(0, lastDotIdx);
        String columnName = lastDotIdx > 0 ? fqColumnName.substring(lastDotIdx + 1) : fqColumnName;
        TableSchema tableSchema = schemaFor(fullyQualifiedTableName);
        assertNotNull(fullyQualifiedTableName + " not included", tableSchema);
        Schema valueSchema = tableSchema.valueSchema();
        assertNotNull(fullyQualifiedTableName + ".Value schema not included", valueSchema);
        assertNull(columnName + " not excluded;", valueSchema.field(columnName));
    });
}
Also used : TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) Point(io.debezium.data.geometry.Point)

Aggregations

TableSchema (io.debezium.relational.TableSchema)17 Schema (org.apache.kafka.connect.data.Schema)8 Envelope (io.debezium.data.Envelope)5 Struct (org.apache.kafka.connect.data.Struct)5 SourceRecord (org.apache.kafka.connect.source.SourceRecord)5 Table (io.debezium.relational.Table)4 TableId (io.debezium.relational.TableId)3 PostgresConnection (io.debezium.connector.postgresql.connection.PostgresConnection)2 ReplicationMessage (io.debezium.connector.postgresql.connection.ReplicationMessage)1 Point (io.debezium.data.geometry.Point)1 BlockingConsumer (io.debezium.function.BlockingConsumer)1 ParsingException (io.debezium.text.ParsingException)1 BitSet (java.util.BitSet)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 ConnectException (org.apache.kafka.connect.errors.ConnectException)1 Test (org.junit.Test)1