use of io.debezium.relational.TableSchema in project debezium by debezium.
the class RecordsSnapshotProducer method generateReadRecord.
protected void generateReadRecord(TableId tableId, Object[] rowData) {
if (rowData.length == 0) {
return;
}
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
Object key = tableSchema.keyFromColumnData(rowData);
Struct value = tableSchema.valueFromColumnData(rowData);
if (key == null || value == null) {
return;
}
Schema keySchema = tableSchema.keySchema();
sourceInfo.update(clock().currentTimeInMicros());
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
currentRecord.set(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.read(value, sourceInfo.source(), clock().currentTimeInMillis())));
}
use of io.debezium.relational.TableSchema in project debezium by debezium.
the class RecordsStreamProducer method tableSchemaFor.
private TableSchema tableSchemaFor(TableId tableId) throws SQLException {
PostgresSchema schema = schema();
if (schema.isFilteredOut(tableId)) {
logger.debug("table '{}' is filtered out, ignoring", tableId);
return null;
}
TableSchema tableSchema = schema.schemaFor(tableId);
if (tableSchema != null) {
return tableSchema;
}
// which means that is a newly created table; so refresh our schema to get the definition for this table
try (final PostgresConnection connection = taskContext.createConnection()) {
schema.refresh(connection, tableId);
}
tableSchema = schema.schemaFor(tableId);
if (tableSchema == null) {
logger.warn("cannot load schema for table '{}'", tableId);
return null;
} else {
logger.debug("refreshed DB schema to include table '{}'", tableId);
return tableSchema;
}
}
use of io.debezium.relational.TableSchema in project debezium by debezium.
the class RecordsStreamProducer method generateDeleteRecord.
protected void generateDeleteRecord(TableId tableId, Object[] oldRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
if (oldRowData == null || oldRowData.length == 0) {
logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
return;
}
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
Object key = tableSchema.keyFromColumnData(oldRowData);
Struct value = tableSchema.valueFromColumnData(oldRowData);
if (key == null || value == null) {
return;
}
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
// create the regular delete record
ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.delete(value, sourceInfo.source(), clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
// And send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
if (taskContext.config().isEmitTombstoneOnDelete()) {
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, null, null), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
}
}
use of io.debezium.relational.TableSchema in project debezium by debezium.
the class RecordsStreamProducer method generateUpdateRecord.
protected void generateUpdateRecord(TableId tableId, Object[] oldRowData, Object[] newRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
if (newRowData == null || newRowData.length == 0) {
logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
return;
}
Schema oldKeySchema = null;
Struct oldValue = null;
Object oldKey = null;
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
if (oldRowData != null && oldRowData.length > 0) {
oldKey = tableSchema.keyFromColumnData(oldRowData);
oldKeySchema = tableSchema.keySchema();
oldValue = tableSchema.valueFromColumnData(oldRowData);
}
Object newKey = tableSchema.keyFromColumnData(newRowData);
Struct newValue = tableSchema.valueFromColumnData(newRowData);
Schema newKeySchema = tableSchema.keySchema();
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
Struct source = sourceInfo.source();
if (oldKey != null && !Objects.equals(oldKey, newKey)) {
// the primary key has changed, so we need to send a DELETE followed by a CREATE
// then send a delete event for the old key ...
ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, envelope.schema(), envelope.delete(oldValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
if (taskContext.config().isEmitTombstoneOnDelete()) {
// send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, null, null), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
}
// then send a create event for the new key...
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.create(newValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending create event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
} else {
SourceRecord record = new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.update(oldValue, newValue, source, clock().currentTimeInMillis()));
recordConsumer.accept(new ChangeEvent(record, isLastEventForLsn));
}
}
use of io.debezium.relational.TableSchema in project debezium by debezium.
the class PostgresSchemaIT method assertColumnsExcluded.
protected void assertColumnsExcluded(String... columnNames) {
Arrays.stream(columnNames).forEach(fqColumnName -> {
int lastDotIdx = fqColumnName.lastIndexOf(".");
String fullyQualifiedTableName = fqColumnName.substring(0, lastDotIdx);
String columnName = lastDotIdx > 0 ? fqColumnName.substring(lastDotIdx + 1) : fqColumnName;
TableSchema tableSchema = schemaFor(fullyQualifiedTableName);
assertNotNull(fullyQualifiedTableName + " not included", tableSchema);
Schema valueSchema = tableSchema.valueSchema();
assertNotNull(fullyQualifiedTableName + ".Value schema not included", valueSchema);
assertNull(columnName + " not excluded;", valueSchema.field(columnName));
});
}
Aggregations