use of io.debezium.data.Envelope in project debezium by debezium.
the class RecordsStreamProducer method generateCreateRecord.
protected void generateCreateRecord(TableId tableId, Object[] rowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
if (rowData == null || rowData.length == 0) {
logger.warn("no new values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
return;
}
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
Object key = tableSchema.keyFromColumnData(rowData);
Struct value = tableSchema.valueFromColumnData(rowData);
if (key == null || value == null) {
return;
}
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
SourceRecord record = new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.create(value, sourceInfo.source(), clock().currentTimeInMillis()));
if (logger.isDebugEnabled()) {
logger.debug("sending create event '{}' to topic '{}'", record, topicName);
}
recordConsumer.accept(new ChangeEvent(record, isLastEventForLsn));
}
use of io.debezium.data.Envelope in project debezium by debezium.
the class RecordsSnapshotProducer method generateReadRecord.
protected void generateReadRecord(TableId tableId, Object[] rowData) {
if (rowData.length == 0) {
return;
}
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
Object key = tableSchema.keyFromColumnData(rowData);
Struct value = tableSchema.valueFromColumnData(rowData);
if (key == null || value == null) {
return;
}
Schema keySchema = tableSchema.keySchema();
sourceInfo.update(clock().currentTimeInMicros());
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
currentRecord.set(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.read(value, sourceInfo.source(), clock().currentTimeInMillis())));
}
use of io.debezium.data.Envelope in project debezium by debezium.
the class RecordsStreamProducer method generateDeleteRecord.
protected void generateDeleteRecord(TableId tableId, Object[] oldRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
if (oldRowData == null || oldRowData.length == 0) {
logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
return;
}
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
Object key = tableSchema.keyFromColumnData(oldRowData);
Struct value = tableSchema.valueFromColumnData(oldRowData);
if (key == null || value == null) {
return;
}
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
// create the regular delete record
ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.delete(value, sourceInfo.source(), clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
// And send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
if (taskContext.config().isEmitTombstoneOnDelete()) {
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, keySchema, key, null, null), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
}
}
use of io.debezium.data.Envelope in project debezium by debezium.
the class RecordsStreamProducer method generateUpdateRecord.
protected void generateUpdateRecord(TableId tableId, Object[] oldRowData, Object[] newRowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
if (newRowData == null || newRowData.length == 0) {
logger.warn("no values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
return;
}
Schema oldKeySchema = null;
Struct oldValue = null;
Object oldKey = null;
TableSchema tableSchema = schema().schemaFor(tableId);
assert tableSchema != null;
if (oldRowData != null && oldRowData.length > 0) {
oldKey = tableSchema.keyFromColumnData(oldRowData);
oldKeySchema = tableSchema.keySchema();
oldValue = tableSchema.valueFromColumnData(oldRowData);
}
Object newKey = tableSchema.keyFromColumnData(newRowData);
Struct newValue = tableSchema.valueFromColumnData(newRowData);
Schema newKeySchema = tableSchema.keySchema();
Map<String, ?> partition = sourceInfo.partition();
Map<String, ?> offset = sourceInfo.offset();
String topicName = topicSelector().topicNameFor(tableId);
Envelope envelope = tableSchema.getEnvelopeSchema();
Struct source = sourceInfo.source();
if (oldKey != null && !Objects.equals(oldKey, newKey)) {
// the primary key has changed, so we need to send a DELETE followed by a CREATE
// then send a delete event for the old key ...
ChangeEvent changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, envelope.schema(), envelope.delete(oldValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending delete event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
if (taskContext.config().isEmitTombstoneOnDelete()) {
// send a tombstone event (null value) for the old key so it can be removed from the Kafka log eventually...
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, oldKeySchema, oldKey, null, null), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending tombstone event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
}
// then send a create event for the new key...
changeEvent = new ChangeEvent(new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.create(newValue, source, clock().currentTimeInMillis())), isLastEventForLsn);
if (logger.isDebugEnabled()) {
logger.debug("sending create event '{}' to topic '{}'", changeEvent.getRecord(), topicName);
}
recordConsumer.accept(changeEvent);
} else {
SourceRecord record = new SourceRecord(partition, offset, topicName, null, newKeySchema, newKey, envelope.schema(), envelope.update(oldValue, newValue, source, clock().currentTimeInMillis()));
recordConsumer.accept(new ChangeEvent(record, isLastEventForLsn));
}
}
use of io.debezium.data.Envelope in project debezium by debezium.
the class RecordMakers method assign.
/**
* Assign the given table number to the table with the specified {@link TableId table ID}.
*
* @param tableNumber the table number found in binlog events
* @param id the identifier for the corresponding table
* @return {@code true} if the assignment was successful, or {@code false} if the table is currently excluded in the
* connector's configuration
*/
public boolean assign(long tableNumber, TableId id) {
Long existingTableNumber = tableNumbersByTableId.get(id);
if (existingTableNumber != null && existingTableNumber.longValue() == tableNumber && convertersByTableNumber.containsKey(tableNumber)) {
// This is the exact same table number for the same table, so do nothing ...
return true;
}
TableSchema tableSchema = schema.schemaFor(id);
if (tableSchema == null)
return false;
String topicName = topicSelector.getTopic(id);
Envelope envelope = tableSchema.getEnvelopeSchema();
// Generate this table's insert, update, and delete converters ...
Integer partitionNum = null;
Converter converter = new Converter() {
@Override
public int read(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.read(value, origin, ts));
consumer.accept(record);
return 1;
}
return 0;
}
@Override
public int insert(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(value, origin, ts));
consumer.accept(record);
return 1;
}
return 0;
}
@Override
public int update(SourceInfo source, Object[] before, Object[] after, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
int count = 0;
Object key = tableSchema.keyFromColumnData(after);
Struct valueAfter = tableSchema.valueFromColumnData(after);
if (valueAfter != null || key != null) {
Object oldKey = tableSchema.keyFromColumnData(before);
Struct valueBefore = tableSchema.valueFromColumnData(before);
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
if (key != null && !Objects.equals(key, oldKey)) {
// The key has changed, so we need to deal with both the new key and old key.
// Consumers may push the events into a system that won't allow both records to exist at the same time,
// so we first want to send the delete event for the old key...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, envelope.schema(), envelope.delete(valueBefore, origin, ts));
consumer.accept(record);
++count;
if (emitTombstoneOnDelete) {
// Next send a tombstone event for the old key ...
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, null, null);
consumer.accept(record);
++count;
}
// And finally send the create event ...
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(valueAfter, origin, ts));
consumer.accept(record);
++count;
} else {
// The key has not changed, so a simple update is fine ...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.update(valueBefore, valueAfter, origin, ts));
consumer.accept(record);
++count;
}
}
return count;
}
@Override
public int delete(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
int count = 0;
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
// Send a delete message ...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.delete(value, origin, ts));
consumer.accept(record);
++count;
// And send a tombstone ...
if (emitTombstoneOnDelete) {
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, null, null);
consumer.accept(record);
++count;
}
}
return count;
}
@Override
public String toString() {
return "RecordMaker.Converter(" + id + ")";
}
};
convertersByTableNumber.put(tableNumber, converter);
Long previousTableNumber = tableNumbersByTableId.put(id, tableNumber);
tableIdsByTableNumber.put(tableNumber, id);
if (previousTableNumber != null) {
assert previousTableNumber.longValue() != tableNumber;
convertersByTableNumber.remove(previousTableNumber);
}
return true;
}
Aggregations