use of io.debezium.DebeziumException in project debezium-server-batch by memiiso.
the class AbstractChangeConsumer method handleBatch.
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, DebeziumEngine.RecordCommitter<ChangeEvent<Object, Object>> committer) throws InterruptedException {
LOGGER.trace("Received {} events", records.size());
Instant start = Instant.now();
Map<String, List<DebeziumEvent>> events = records.stream().map((ChangeEvent<Object, Object> e) -> {
try {
return new DebeziumEvent(e.destination(), // valDeserializer.deserialize(e.destination(), getBytes(e.value())),
getPayload(e.destination(), e.value()), e.key() == null ? null : keyDeserializer.deserialize(e.destination(), getBytes(e.key())), mapper.readTree(getBytes(e.value())).get("schema"), e.key() == null ? null : mapper.readTree(getBytes(e.key())).get("schema"));
} catch (IOException ex) {
throw new DebeziumException(ex);
}
}).collect(Collectors.groupingBy(DebeziumEvent::destination));
long numUploadedEvents = 0;
for (Map.Entry<String, List<DebeziumEvent>> destinationEvents : events.entrySet()) {
// group list of events by their schema, if in the batch we have schema change events grouped by their schema
// so with this uniform schema is guaranteed for each batch
Map<JsonNode, List<DebeziumEvent>> eventsGroupedBySchema = destinationEvents.getValue().stream().collect(Collectors.groupingBy(DebeziumEvent::valueSchema));
LOGGER.debug("Batch got {} records with {} different schema!!", events.size(), eventsGroupedBySchema.keySet().size());
for (List<DebeziumEvent> schemaEvents : eventsGroupedBySchema.values()) {
numUploadedEvents += this.uploadDestination(destinationEvents.getKey(), schemaEvents);
}
}
// even its should be saved to file periodically
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Processed event '{}'", record);
committer.markProcessed(record);
}
committer.markBatchFinished();
this.logConsumerProgress(numUploadedEvents);
LOGGER.debug("Received:{} Processed:{} events", records.size(), numUploadedEvents);
batchSizeWait.waitMs(numUploadedEvents, (int) Duration.between(start, Instant.now()).toMillis());
}
use of io.debezium.DebeziumException in project debezium-server-batch by memiiso.
the class BatchBigqueryChangeConsumer method uploadDestination.
@Override
public long uploadDestination(String destination, List<DebeziumEvent> data) {
try {
Instant start = Instant.now();
final long numRecords;
TableId tableId = getTableId(destination);
DebeziumBigqueryEvent sampleEvent = new DebeziumBigqueryEvent(data.get(0));
Schema schema = sampleEvent.getBigQuerySchema(castDeletedField);
Clustering clustering = sampleEvent.getBigQueryClustering();
// Google BigQuery Configuration for a load operation. A load configuration can be used to load data
// into a table with a {@link com.google.cloud.WriteChannel}
WriteChannelConfiguration.Builder wCCBuilder = WriteChannelConfiguration.newBuilder(tableId, FormatOptions.json()).setWriteDisposition(JobInfo.WriteDisposition.valueOf(writeDisposition)).setClustering(clustering).setTimePartitioning(timePartitioning).setSchemaUpdateOptions(schemaUpdateOptions).setCreateDisposition(JobInfo.CreateDisposition.valueOf(createDisposition)).setMaxBadRecords(0);
if (schema != null) {
LOGGER.trace("Setting schema to: {}", schema);
wCCBuilder.setSchema(schema);
}
// WriteChannel implementation to stream data into a BigQuery table.
try (TableDataWriteChannel writer = bqClient.writer(wCCBuilder.build())) {
// Constructs a stream that writes bytes to the given channel.
try (OutputStream stream = Channels.newOutputStream(writer)) {
for (DebeziumEvent e : data) {
final JsonNode valNode = e.value();
if (valNode == null) {
LOGGER.warn("Null Value received skipping the entry! destination:{} key:{}", destination, getString(e.key()));
continue;
}
final String valData = mapper.writeValueAsString(valNode) + System.lineSeparator();
stream.write(valData.getBytes(StandardCharsets.UTF_8));
}
}
Job job = writer.getJob().waitFor();
JobStatistics.LoadStatistics jobStatistics = job.getStatistics();
numRecords = jobStatistics.getOutputRows();
if (job.isDone()) {
LOGGER.debug("Data successfully loaded to {}. rows: {}, jobStatistics: {}", tableId, numRecords, jobStatistics);
} else {
throw new DebeziumException("BigQuery was unable to load into the table:" + tableId + "." + "\nError:" + job.getStatus().getError() + "\nJobStatistics:" + jobStatistics + "\nBadRecords:" + jobStatistics.getBadRecords() + "\nJobStatistics:" + jobStatistics);
}
}
LOGGER.debug("Uploaded {} rows to:{}, upload time:{}, clusteredFields:{}", numRecords, tableId, Duration.between(start, Instant.now()).truncatedTo(ChronoUnit.SECONDS), clustering);
return numRecords;
} catch (BigQueryException | InterruptedException | IOException e) {
e.printStackTrace();
throw new DebeziumException(e);
}
}
use of io.debezium.DebeziumException in project debezium-server-batch by memiiso.
the class StreamBigqueryChangeConsumer method uploadDestination.
@Override
public long uploadDestination(String destination, List<DebeziumEvent> data) {
long numRecords = data.size();
Table table = getTable(destination, data.get(0));
DataWriter writer = jsonStreamWriters.computeIfAbsent(destination, k -> getDataWriter(table));
try {
writer.append(new AppendContext(data, castDeletedField));
writer.waitAppend();
} catch (DescriptorValidationException | IOException e) {
throw new DebeziumException("Failed to append data to stream " + writer.streamWriter.getStreamName(), e);
}
LOGGER.debug("Appended {} records to {} successfully.", numRecords, destination);
return numRecords;
}
use of io.debezium.DebeziumException in project debezium by debezium.
the class MySqlJdbcContext method readDatabaseCollations.
/**
* Read the MySQL default character sets for exisiting databases.
*
* @return the map of database names with their default character sets; never null
*/
protected Map<String, DatabaseLocales> readDatabaseCollations() {
logger.debug("Reading default database charsets");
try {
start();
return jdbc.connect().queryAndMap("SELECT schema_name, default_character_set_name, default_collation_name FROM information_schema.schemata", rs -> {
final Map<String, DatabaseLocales> charsets = new HashMap<>();
while (rs.next()) {
String dbName = rs.getString(1);
String charset = rs.getString(2);
String collation = rs.getString(3);
if (dbName != null && (charset != null || collation != null)) {
charsets.put(dbName, new DatabaseLocales(charset, collation));
logger.debug("\t{} = {}, {}", Strings.pad(dbName, 45, ' '), Strings.pad(charset, 45, ' '), Strings.pad(collation, 45, ' '));
}
}
return charsets;
});
} catch (SQLException e) {
throw new DebeziumException("Error reading default database charsets: " + e.getMessage(), e);
}
}
use of io.debezium.DebeziumException in project debezium by debezium.
the class MySqlSchema method getValueConverters.
private static MySqlValueConverters getValueConverters(MySqlConnectorConfig configuration) {
// Use MySQL-specific converters and schemas for values ...
TemporalPrecisionMode timePrecisionMode = configuration.getTemporalPrecisionMode();
DecimalMode decimalMode = configuration.getDecimalMode();
String bigIntUnsignedHandlingModeStr = configuration.getConfig().getString(MySqlConnectorConfig.BIGINT_UNSIGNED_HANDLING_MODE);
BigIntUnsignedHandlingMode bigIntUnsignedHandlingMode = BigIntUnsignedHandlingMode.parse(bigIntUnsignedHandlingModeStr);
BigIntUnsignedMode bigIntUnsignedMode = bigIntUnsignedHandlingMode.asBigIntUnsignedMode();
final boolean timeAdjusterEnabled = configuration.getConfig().getBoolean(MySqlConnectorConfig.ENABLE_TIME_ADJUSTER);
// TODO With MySQL connector rewrite the error handling should report also binlog coordinates
return new MySqlValueConverters(decimalMode, timePrecisionMode, bigIntUnsignedMode, configuration.binaryHandlingMode(), timeAdjusterEnabled ? MySqlValueConverters::adjustTemporal : x -> x, (message, exception) -> {
if (configuration.getEventProcessingFailureHandlingMode() == EventProcessingFailureHandlingMode.FAIL) {
throw new DebeziumException(message, exception);
} else if (configuration.getEventProcessingFailureHandlingMode() == EventProcessingFailureHandlingMode.WARN) {
logger.warn(message, exception);
}
});
}
Aggregations