use of com.github.shyiko.mysql.binlog.event.EventData in project debezium by debezium.
the class BinlogReader method handleEvent.
protected void handleEvent(Event event) {
if (event == null)
return;
// Update the source offset info. Note that the client returns the value in *milliseconds*, even though the binlog
// contains only *seconds* precision ...
EventHeader eventHeader = event.getHeader();
// client returns milliseconds, but only second
source.setBinlogTimestampSeconds(eventHeader.getTimestamp() / 1000L);
// precision
source.setBinlogServerId(eventHeader.getServerId());
EventType eventType = eventHeader.getEventType();
if (eventType == EventType.ROTATE) {
EventData eventData = event.getData();
RotateEventData rotateEventData;
if (eventData instanceof EventDeserializer.EventDataWrapper) {
rotateEventData = (RotateEventData) ((EventDeserializer.EventDataWrapper) eventData).getInternal();
} else {
rotateEventData = (RotateEventData) eventData;
}
source.setBinlogStartPoint(rotateEventData.getBinlogFilename(), rotateEventData.getBinlogPosition());
} else if (eventHeader instanceof EventHeaderV4) {
EventHeaderV4 trackableEventHeader = (EventHeaderV4) eventHeader;
source.setEventPosition(trackableEventHeader.getPosition(), trackableEventHeader.getEventLength());
}
// If there is a handler for this event, forward the event to it ...
try {
// Forward the event to the handler ...
eventHandlers.getOrDefault(eventType, this::ignoreEvent).accept(event);
// Generate heartbeat message if the time is right
heartbeat.heartbeat((BlockingConsumer<SourceRecord>) this::enqueueRecord);
// Capture that we've completed another event ...
source.completeEvent();
if (skipEvent) {
// We're in the mode of skipping events and we just skipped this one, so decrement our skip count ...
--initialEventsToSkip;
skipEvent = initialEventsToSkip > 0;
}
} catch (RuntimeException e) {
// There was an error in the event handler, so propagate the failure to Kafka Connect ...
logReaderState();
failed(e, "Error processing binlog event");
// Do not stop the client, since Kafka Connect should stop the connector on it's own
// (and doing it here may cause problems the second time it is stopped).
// We can clear the listeners though so that we ignore all future events ...
eventHandlers.clear();
logger.info("Error processing binlog event, and propagating to Kafka Connect so it stops this connector. Future binlog events read before connector is shutdown will be ignored.");
} catch (InterruptedException e) {
// Most likely because this reader was stopped and our thread was interrupted ...
Thread.interrupted();
eventHandlers.clear();
logger.info("Stopped processing binlog events due to thread interruption");
}
}
Aggregations