use of io.debezium.document.Document in project debezium by debezium.
the class ConnectorOutputTest method runConnector.
/**
* Run the connector described by the supplied test specification.
*
* @param spec the test specification
* @param callback the function that should be called when the connector is stopped
*/
protected void runConnector(TestSpecification spec, CompletionCallback callback) {
PreviousContext preRunContext = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
final Configuration environmentConfig = Configuration.copy(spec.environment()).build();
final Configuration connectorConfig = spec.config();
String[] ignorableFieldNames = environmentConfig.getString(ENV_IGNORE_FIELDS, "").split(",");
final Set<String> ignorableFields = Arrays.stream(ignorableFieldNames).map(String::trim).collect(Collectors.toSet());
String[] globallyIgnorableFieldNames = globallyIgnorableFieldNames();
if (globallyIgnorableFieldNames != null && globallyIgnorableFieldNames.length != 0) {
ignorableFields.addAll(Arrays.stream(globallyIgnorableFieldNames).map(String::trim).collect(Collectors.toSet()));
}
final SchemaAndValueConverter keyConverter = new SchemaAndValueConverter(environmentConfig, true);
final SchemaAndValueConverter valueConverter = new SchemaAndValueConverter(environmentConfig, false);
final TestData testData = spec.testData();
// Get any special comparators ...
final Map<String, RecordValueComparator> comparatorsByFieldName = new HashMap<>();
addValueComparatorsByFieldPath(comparatorsByFieldName::put);
final Map<String, RecordValueComparator> comparatorsBySchemaName = new HashMap<>();
addValueComparatorsBySchemaName(comparatorsBySchemaName::put);
RuntimeException runError = null;
CompletionResult problem = new CompletionResult(callback);
try {
// Set up the test data ...
final PreviewIterator<Document> expectedRecords = Iterators.preview(testData.read());
final Consumer<Document> recorder = testData::write;
// We need something that will measure the amount of time since our consumer has seen a record ...
TimeSince timeSinceLastRecord = Threads.timeSince(Clock.SYSTEM);
// We'll keep the last 10 expected and actual records so that there is some context if they don't match ...
Queue<SourceRecord> actualRecordHistory = fixedSizeQueue(10);
Queue<SourceRecord> expectedRecordHistory = fixedSizeQueue(10);
// Define what happens for each record ...
ConsumerCompletion result = new ConsumerCompletion();
Consumer<SourceRecord> consumer = (actualRecord) -> {
PreviousContext prev = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
try {
Testing.debug("actual record: " + SchemaUtil.asString(actualRecord));
timeSinceLastRecord.reset();
// Record the actual in the history ...
actualRecordHistory.add(actualRecord);
// And possibly hand it to the test's recorder ...
try {
Document jsonRecord = serializeSourceRecord(actualRecord, keyConverter, valueConverter);
if (jsonRecord != null)
recorder.accept(jsonRecord);
} catch (IOException e) {
String msg = "Error converting JSON to SourceRecord";
Testing.debug(msg);
throw new ConnectException(msg, e);
}
if (expectedRecords != null) {
// Get the test's next expected record ...
if (!expectedRecords.hasNext()) {
// We received an actual record but don't have or expect one ...
String msg = "Source record found but nothing expected";
result.error();
Testing.debug(msg);
throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
}
Document expected = expectedRecords.next();
if (isEndCommand(expected)) {
result.error();
String msg = "Source record was found but not expected: " + SchemaUtil.asString(actualRecord);
Testing.debug(msg);
throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
} else if (isCommand(expected)) {
Testing.debug("applying command: " + SchemaUtil.asString(expected));
applyCommand(expected, result);
} else {
try {
// Otherwise, build a record from the expected and add it to the history ...
SourceRecord expectedRecord = rehydrateSourceRecord(expected, keyConverter, valueConverter);
expectedRecordHistory.add(expectedRecord);
Testing.debug("expected record: " + SchemaUtil.asString(expectedRecord));
// And compare the records ...
try {
assertSourceRecordMatch(actualRecord, expectedRecord, ignorableFields::contains, comparatorsByFieldName, comparatorsBySchemaName);
} catch (AssertionError e) {
result.error();
String msg = "Source record with key " + SchemaUtil.asString(actualRecord.key()) + " did not match expected record: " + e.getMessage();
Testing.debug(msg);
throw new MismatchRecordException(e, msg, actualRecordHistory, expectedRecordHistory);
}
} catch (IOException e) {
result.exception();
String msg = "Error converting JSON to SourceRecord";
Testing.debug(msg);
throw new ConnectException(msg, e);
}
}
if (!expectedRecords.hasNext()) {
// We expect no more records, so stop the connector ...
result.stop();
String msg = "Stopping connector after no more expected records found";
Testing.debug(msg);
throw new StopConnectorException(msg);
}
// Peek at the next record to see if it is a command ...
Document nextExpectedRecord = expectedRecords.peek();
if (isCommand(nextExpectedRecord)) {
// consume it and apply it ...
applyCommand(expectedRecords.next(), result);
}
}
} finally {
prev.restore();
}
};
// Set up the configuration for the engine to include the connector configuration and apply as defaults
// the environment and engine parameters ...
Configuration engineConfig = Configuration.copy(connectorConfig).withDefault(environmentConfig).withDefault(EmbeddedEngine.ENGINE_NAME, spec.name()).withDefault(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).withDefault(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
// Create the engine ...
EmbeddedEngine engine = EmbeddedEngine.create().using(engineConfig).notifying(consumer).using(this.getClass().getClassLoader()).using(problem).build();
long connectorTimeoutInSeconds = environmentConfig.getLong(ENV_CONNECTOR_TIMEOUT_IN_SECONDS, 10);
// Get ready to run the connector one or more times ...
do {
// Each time create a thread that will stop our connector if we don't get enough results
Thread timeoutThread = Threads.timeout(spec.name() + "-connector-output", connectorTimeoutInSeconds, TimeUnit.SECONDS, timeSinceLastRecord, engine::stop);
// But plan to stop our timeout thread as soon as the connector completes ...
result.uponCompletion(timeoutThread::interrupt);
timeoutThread.start();
// Run the connector and block until the connector is stopped by the timeout thread or until
// an exception is thrown within the connector (perhaps by the consumer) ...
Testing.debug("Starting connector");
result.reset();
engine.run();
} while (result.get() == ExecutionResult.RESTART_REQUESTED);
} catch (IOException e) {
runError = new RuntimeException("Error reading test data: " + e.getMessage(), e);
} catch (RuntimeException t) {
runError = t;
} finally {
// And clean up everything ...
try {
testData.close();
} catch (IOException e) {
if (runError != null) {
runError = new RuntimeException("Error closing test data: " + e.getMessage(), e);
}
} finally {
try {
keyConverter.close();
} finally {
try {
valueConverter.close();
} finally {
preRunContext.restore();
}
}
}
}
if (runError != null) {
throw runError;
}
if (problem.hasError()) {
Throwable error = problem.error();
if (error instanceof AssertionError) {
fail(problem.message());
} else if (error instanceof MismatchRecordException) {
MismatchRecordException mismatch = (MismatchRecordException) error;
LinkedList<SourceRecord> actualHistory = mismatch.getActualRecords();
LinkedList<SourceRecord> expectedHistory = mismatch.getExpectedRecords();
Testing.print("");
Testing.print("FAILURE in connector integration test '" + spec.name() + "' in class " + getClass());
Testing.print(" actual record: " + SchemaUtil.asString(actualHistory.getLast()));
Testing.print(" expected record: " + SchemaUtil.asString(expectedHistory.getLast()));
Testing.print(mismatch.getMessage());
Testing.print("");
AssertionError cause = ((MismatchRecordException) error).getError();
if (cause != null) {
throw cause;
}
fail(problem.message());
} else if (error instanceof RuntimeException) {
throw (RuntimeException) error;
} else {
throw new RuntimeException(error);
}
}
}
use of io.debezium.document.Document in project debezium by debezium.
the class ConnectorOutputTest method serializeSourceRecord.
/**
* Serialize the source record to document form.
*
* @param record the record; may not be null
* @param keyConverter the converter for the record key's schema and payload
* @param valueConverter the converter for the record value's schema and payload
* @return the document form of the source record; never null
* @throws IOException if there is an error converting the key or value
*/
private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException {
Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key());
Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value());
Document sourcePartition = Document.create().putAll(record.sourcePartition());
Document sourceOffset = Document.create().putAll(record.sourceOffset());
Document parent = Document.create();
parent.set("sourcePartition", sourcePartition);
parent.set("sourceOffset", sourceOffset);
parent.set("topic", record.topic());
parent.set("kafkaPartition", record.kafkaPartition());
parent.set("keySchema", keyAndSchema.getDocument("schema"));
parent.set("key", keyAndSchema.getDocument("payload"));
parent.set("valueSchema", valueAndSchema.getDocument("schema"));
parent.set("value", valueAndSchema.getDocument("payload"));
return parent;
}
use of io.debezium.document.Document in project debezium by debezium.
the class MySqlTaskContextTest method historyRecord.
protected HistoryRecord historyRecord(String serverName, String binlogFilename, int position, String gtids, int event, int row, boolean snapshot) {
Document source = Document.create(SourceInfo.SERVER_NAME_KEY, serverName);
Document pos = Document.create(SourceInfo.BINLOG_FILENAME_OFFSET_KEY, binlogFilename, SourceInfo.BINLOG_POSITION_OFFSET_KEY, position);
if (row >= 0) {
pos = pos.set(SourceInfo.BINLOG_ROW_IN_EVENT_OFFSET_KEY, row);
}
if (event >= 0) {
pos = pos.set(SourceInfo.EVENTS_TO_SKIP_OFFSET_KEY, event);
}
if (gtids != null && gtids.trim().length() != 0) {
pos = pos.set(SourceInfo.GTID_SET_KEY, gtids);
}
if (snapshot) {
pos = pos.set(SourceInfo.SNAPSHOT_KEY, true);
}
return new HistoryRecord(Document.create(HistoryRecord.Fields.SOURCE, source, HistoryRecord.Fields.POSITION, pos));
}
use of io.debezium.document.Document in project debezium by debezium.
the class NonStreamingWal2JsonMessageDecoder method processMessage.
@Override
public void processMessage(ByteBuffer buffer, ReplicationMessageProcessor processor, TypeRegistry typeRegistry) throws SQLException, InterruptedException {
try {
if (!buffer.hasArray()) {
throw new IllegalStateException("Invalid buffer received from PG server during streaming replication");
}
final byte[] source = buffer.array();
final byte[] content = Arrays.copyOfRange(source, buffer.arrayOffset(), source.length);
final Document message = DocumentReader.floatNumbersAsTextReader().read(content);
LOGGER.debug("Message arrived for decoding {}", message);
final long txId = message.getLong("xid");
final String timestamp = message.getString("timestamp");
final long commitTime = dateTime.systemTimestamp(timestamp);
final Array changes = message.getArray("change");
Iterator<Entry> it = changes.iterator();
while (it.hasNext()) {
Value value = it.next().getValue();
processor.process(new Wal2JsonReplicationMessage(txId, commitTime, value.asDocument(), containsMetadata, !it.hasNext(), typeRegistry));
}
} catch (final IOException e) {
throw new ConnectException(e);
}
}
use of io.debezium.document.Document in project debezium by debezium.
the class StreamingWal2JsonMessageDecoder method doProcessMessage.
private void doProcessMessage(ReplicationMessageProcessor processor, TypeRegistry typeRegistry, byte[] content, boolean lastMessage) throws IOException, SQLException, InterruptedException {
final Document change = DocumentReader.floatNumbersAsTextReader().read(content);
LOGGER.trace("Change arrived for decoding {}", change);
processor.process(new Wal2JsonReplicationMessage(txId, commitTime, change, containsMetadata, lastMessage, typeRegistry));
}
Aggregations