use of io.debezium.relational.TableSchema in project debezium by debezium.
the class MySqlSchemaTest method assertTableIncluded.
protected void assertTableIncluded(String fullyQualifiedTableName) {
TableId tableId = TableId.parse(fullyQualifiedTableName);
assertThat(mysql.tables().forTable(tableId)).isNotNull();
TableSchema tableSchema = mysql.schemaFor(tableId);
assertThat(tableSchema).isNotNull();
assertThat(tableSchema.keySchema().name()).isEqualTo(SchemaNameAdjuster.validFullname(SERVER_NAME + "." + fullyQualifiedTableName + ".Key"));
assertThat(tableSchema.valueSchema().name()).isEqualTo(SchemaNameAdjuster.validFullname(SERVER_NAME + "." + fullyQualifiedTableName + ".Value"));
}
use of io.debezium.relational.TableSchema in project debezium by debezium.
the class RecordMakers method assign.
/**
* Assign the given table number to the table with the specified {@link TableId table ID}.
*
* @param tableNumber the table number found in binlog events
* @param id the identifier for the corresponding table
* @return {@code true} if the assignment was successful, or {@code false} if the table is currently excluded in the
* connector's configuration
*/
public boolean assign(long tableNumber, TableId id) {
Long existingTableNumber = tableNumbersByTableId.get(id);
if (existingTableNumber != null && existingTableNumber.longValue() == tableNumber && convertersByTableNumber.containsKey(tableNumber)) {
// This is the exact same table number for the same table, so do nothing ...
return true;
}
TableSchema tableSchema = schema.schemaFor(id);
if (tableSchema == null)
return false;
String topicName = topicSelector.getTopic(id);
Envelope envelope = tableSchema.getEnvelopeSchema();
// Generate this table's insert, update, and delete converters ...
Integer partitionNum = null;
Converter converter = new Converter() {
@Override
public int read(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.read(value, origin, ts));
consumer.accept(record);
return 1;
}
return 0;
}
@Override
public int insert(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(value, origin, ts));
consumer.accept(record);
return 1;
}
return 0;
}
@Override
public int update(SourceInfo source, Object[] before, Object[] after, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
int count = 0;
Object key = tableSchema.keyFromColumnData(after);
Struct valueAfter = tableSchema.valueFromColumnData(after);
if (valueAfter != null || key != null) {
Object oldKey = tableSchema.keyFromColumnData(before);
Struct valueBefore = tableSchema.valueFromColumnData(before);
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
if (key != null && !Objects.equals(key, oldKey)) {
// The key has changed, so we need to deal with both the new key and old key.
// Consumers may push the events into a system that won't allow both records to exist at the same time,
// so we first want to send the delete event for the old key...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, envelope.schema(), envelope.delete(valueBefore, origin, ts));
consumer.accept(record);
++count;
if (emitTombstoneOnDelete) {
// Next send a tombstone event for the old key ...
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, null, null);
consumer.accept(record);
++count;
}
// And finally send the create event ...
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(valueAfter, origin, ts));
consumer.accept(record);
++count;
} else {
// The key has not changed, so a simple update is fine ...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.update(valueBefore, valueAfter, origin, ts));
consumer.accept(record);
++count;
}
}
return count;
}
@Override
public int delete(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
int count = 0;
Object key = tableSchema.keyFromColumnData(row);
Struct value = tableSchema.valueFromColumnData(row);
if (value != null || key != null) {
Schema keySchema = tableSchema.keySchema();
Map<String, ?> partition = source.partition();
Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
Struct origin = source.struct(id);
// Send a delete message ...
SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.delete(value, origin, ts));
consumer.accept(record);
++count;
// And send a tombstone ...
if (emitTombstoneOnDelete) {
record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, null, null);
consumer.accept(record);
++count;
}
}
return count;
}
@Override
public String toString() {
return "RecordMaker.Converter(" + id + ")";
}
};
convertersByTableNumber.put(tableNumber, converter);
Long previousTableNumber = tableNumbersByTableId.put(id, tableNumber);
tableIdsByTableNumber.put(tableNumber, id);
if (previousTableNumber != null) {
assert previousTableNumber.longValue() != tableNumber;
convertersByTableNumber.remove(previousTableNumber);
}
return true;
}
Aggregations