Search in sources :

Example 1 with BlockingConsumer

use of io.debezium.function.BlockingConsumer in project debezium by debezium.

the class RecordMakers method assign.

/**
 * Assign the given table number to the table with the specified {@link TableId table ID}.
 *
 * @param tableNumber the table number found in binlog events
 * @param id the identifier for the corresponding table
 * @return {@code true} if the assignment was successful, or {@code false} if the table is currently excluded in the
 *         connector's configuration
 */
public boolean assign(long tableNumber, TableId id) {
    Long existingTableNumber = tableNumbersByTableId.get(id);
    if (existingTableNumber != null && existingTableNumber.longValue() == tableNumber && convertersByTableNumber.containsKey(tableNumber)) {
        // This is the exact same table number for the same table, so do nothing ...
        return true;
    }
    TableSchema tableSchema = schema.schemaFor(id);
    if (tableSchema == null)
        return false;
    String topicName = topicSelector.getTopic(id);
    Envelope envelope = tableSchema.getEnvelopeSchema();
    // Generate this table's insert, update, and delete converters ...
    Integer partitionNum = null;
    Converter converter = new Converter() {

        @Override
        public int read(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
            Object key = tableSchema.keyFromColumnData(row);
            Struct value = tableSchema.valueFromColumnData(row);
            if (value != null || key != null) {
                Schema keySchema = tableSchema.keySchema();
                Map<String, ?> partition = source.partition();
                Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
                Struct origin = source.struct(id);
                SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.read(value, origin, ts));
                consumer.accept(record);
                return 1;
            }
            return 0;
        }

        @Override
        public int insert(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
            Object key = tableSchema.keyFromColumnData(row);
            Struct value = tableSchema.valueFromColumnData(row);
            if (value != null || key != null) {
                Schema keySchema = tableSchema.keySchema();
                Map<String, ?> partition = source.partition();
                Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
                Struct origin = source.struct(id);
                SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(value, origin, ts));
                consumer.accept(record);
                return 1;
            }
            return 0;
        }

        @Override
        public int update(SourceInfo source, Object[] before, Object[] after, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
            int count = 0;
            Object key = tableSchema.keyFromColumnData(after);
            Struct valueAfter = tableSchema.valueFromColumnData(after);
            if (valueAfter != null || key != null) {
                Object oldKey = tableSchema.keyFromColumnData(before);
                Struct valueBefore = tableSchema.valueFromColumnData(before);
                Schema keySchema = tableSchema.keySchema();
                Map<String, ?> partition = source.partition();
                Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
                Struct origin = source.struct(id);
                if (key != null && !Objects.equals(key, oldKey)) {
                    // The key has changed, so we need to deal with both the new key and old key.
                    // Consumers may push the events into a system that won't allow both records to exist at the same time,
                    // so we first want to send the delete event for the old key...
                    SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, envelope.schema(), envelope.delete(valueBefore, origin, ts));
                    consumer.accept(record);
                    ++count;
                    if (emitTombstoneOnDelete) {
                        // Next send a tombstone event for the old key ...
                        record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, oldKey, null, null);
                        consumer.accept(record);
                        ++count;
                    }
                    // And finally send the create event ...
                    record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(valueAfter, origin, ts));
                    consumer.accept(record);
                    ++count;
                } else {
                    // The key has not changed, so a simple update is fine ...
                    SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.update(valueBefore, valueAfter, origin, ts));
                    consumer.accept(record);
                    ++count;
                }
            }
            return count;
        }

        @Override
        public int delete(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException {
            int count = 0;
            Object key = tableSchema.keyFromColumnData(row);
            Struct value = tableSchema.valueFromColumnData(row);
            if (value != null || key != null) {
                Schema keySchema = tableSchema.keySchema();
                Map<String, ?> partition = source.partition();
                Map<String, ?> offset = source.offsetForRow(rowNumber, numberOfRows);
                Struct origin = source.struct(id);
                // Send a delete message ...
                SourceRecord record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, envelope.schema(), envelope.delete(value, origin, ts));
                consumer.accept(record);
                ++count;
                // And send a tombstone ...
                if (emitTombstoneOnDelete) {
                    record = new SourceRecord(partition, offset, topicName, partitionNum, keySchema, key, null, null);
                    consumer.accept(record);
                    ++count;
                }
            }
            return count;
        }

        @Override
        public String toString() {
            return "RecordMaker.Converter(" + id + ")";
        }
    };
    convertersByTableNumber.put(tableNumber, converter);
    Long previousTableNumber = tableNumbersByTableId.put(id, tableNumber);
    tableIdsByTableNumber.put(tableNumber, id);
    if (previousTableNumber != null) {
        assert previousTableNumber.longValue() != tableNumber;
        convertersByTableNumber.remove(previousTableNumber);
    }
    return true;
}
Also used : BlockingConsumer(io.debezium.function.BlockingConsumer) TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) BitSet(java.util.BitSet) Envelope(io.debezium.data.Envelope) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 2 with BlockingConsumer

use of io.debezium.function.BlockingConsumer in project debezium by debezium.

the class BinlogReader method handleEvent.

protected void handleEvent(Event event) {
    if (event == null)
        return;
    // Update the source offset info. Note that the client returns the value in *milliseconds*, even though the binlog
    // contains only *seconds* precision ...
    EventHeader eventHeader = event.getHeader();
    // client returns milliseconds, but only second
    source.setBinlogTimestampSeconds(eventHeader.getTimestamp() / 1000L);
    // precision
    source.setBinlogServerId(eventHeader.getServerId());
    EventType eventType = eventHeader.getEventType();
    if (eventType == EventType.ROTATE) {
        EventData eventData = event.getData();
        RotateEventData rotateEventData;
        if (eventData instanceof EventDeserializer.EventDataWrapper) {
            rotateEventData = (RotateEventData) ((EventDeserializer.EventDataWrapper) eventData).getInternal();
        } else {
            rotateEventData = (RotateEventData) eventData;
        }
        source.setBinlogStartPoint(rotateEventData.getBinlogFilename(), rotateEventData.getBinlogPosition());
    } else if (eventHeader instanceof EventHeaderV4) {
        EventHeaderV4 trackableEventHeader = (EventHeaderV4) eventHeader;
        source.setEventPosition(trackableEventHeader.getPosition(), trackableEventHeader.getEventLength());
    }
    // If there is a handler for this event, forward the event to it ...
    try {
        // Forward the event to the handler ...
        eventHandlers.getOrDefault(eventType, this::ignoreEvent).accept(event);
        // Generate heartbeat message if the time is right
        heartbeat.heartbeat((BlockingConsumer<SourceRecord>) this::enqueueRecord);
        // Capture that we've completed another event ...
        source.completeEvent();
        if (skipEvent) {
            // We're in the mode of skipping events and we just skipped this one, so decrement our skip count ...
            --initialEventsToSkip;
            skipEvent = initialEventsToSkip > 0;
        }
    } catch (RuntimeException e) {
        // There was an error in the event handler, so propagate the failure to Kafka Connect ...
        logReaderState();
        failed(e, "Error processing binlog event");
        // Do not stop the client, since Kafka Connect should stop the connector on it's own
        // (and doing it here may cause problems the second time it is stopped).
        // We can clear the listeners though so that we ignore all future events ...
        eventHandlers.clear();
        logger.info("Error processing binlog event, and propagating to Kafka Connect so it stops this connector. Future binlog events read before connector is shutdown will be ignored.");
    } catch (InterruptedException e) {
        // Most likely because this reader was stopped and our thread was interrupted ...
        Thread.interrupted();
        eventHandlers.clear();
        logger.info("Stopped processing binlog events due to thread interruption");
    }
}
Also used : BlockingConsumer(io.debezium.function.BlockingConsumer) EventType(com.github.shyiko.mysql.binlog.event.EventType) EventHeaderV4(com.github.shyiko.mysql.binlog.event.EventHeaderV4) EventHeader(com.github.shyiko.mysql.binlog.event.EventHeader) RotateEventData(com.github.shyiko.mysql.binlog.event.RotateEventData) UpdateRowsEventData(com.github.shyiko.mysql.binlog.event.UpdateRowsEventData) TableMapEventData(com.github.shyiko.mysql.binlog.event.TableMapEventData) WriteRowsEventData(com.github.shyiko.mysql.binlog.event.WriteRowsEventData) QueryEventData(com.github.shyiko.mysql.binlog.event.QueryEventData) RotateEventData(com.github.shyiko.mysql.binlog.event.RotateEventData) EventData(com.github.shyiko.mysql.binlog.event.EventData) GtidEventData(com.github.shyiko.mysql.binlog.event.GtidEventData) DeleteRowsEventData(com.github.shyiko.mysql.binlog.event.DeleteRowsEventData)

Aggregations

BlockingConsumer (io.debezium.function.BlockingConsumer)2 DeleteRowsEventData (com.github.shyiko.mysql.binlog.event.DeleteRowsEventData)1 EventData (com.github.shyiko.mysql.binlog.event.EventData)1 EventHeader (com.github.shyiko.mysql.binlog.event.EventHeader)1 EventHeaderV4 (com.github.shyiko.mysql.binlog.event.EventHeaderV4)1 EventType (com.github.shyiko.mysql.binlog.event.EventType)1 GtidEventData (com.github.shyiko.mysql.binlog.event.GtidEventData)1 QueryEventData (com.github.shyiko.mysql.binlog.event.QueryEventData)1 RotateEventData (com.github.shyiko.mysql.binlog.event.RotateEventData)1 TableMapEventData (com.github.shyiko.mysql.binlog.event.TableMapEventData)1 UpdateRowsEventData (com.github.shyiko.mysql.binlog.event.UpdateRowsEventData)1 WriteRowsEventData (com.github.shyiko.mysql.binlog.event.WriteRowsEventData)1 Envelope (io.debezium.data.Envelope)1 TableSchema (io.debezium.relational.TableSchema)1 BitSet (java.util.BitSet)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 Schema (org.apache.kafka.connect.data.Schema)1 Struct (org.apache.kafka.connect.data.Struct)1 SourceRecord (org.apache.kafka.connect.source.SourceRecord)1