use of io.debezium.document.Array in project debezium by debezium.
the class Wal2JsonReplicationMessage method transform.
private List<ReplicationMessage.Column> transform(final Document data, final String nameField, final String typeField, final String valueField, final String optionalsField) {
final Array columnNames = data.getArray(nameField);
final Array columnTypes = data.getArray(typeField);
final Array columnValues = data.getArray(valueField);
final Array columnOptionals = data.getArray(optionalsField);
if (columnNames.size() != columnTypes.size() || columnNames.size() != columnValues.size()) {
throw new ConnectException("Column related arrays do not have the same size");
}
final List<ReplicationMessage.Column> columns = new ArrayList<>(columnNames.size());
for (int i = 0; i < columnNames.size(); i++) {
final String columnName = columnNames.get(i).asString();
final String columnTypeName = columnTypes.get(i).asString();
final boolean columnOptional = columnOptionals != null ? columnOptionals.get(i).asBoolean() : false;
final Value rawValue = columnValues.get(i);
final PostgresType columnType = typeRegistry.get(parseType(columnName, columnTypeName));
columns.add(new AbstractReplicationMessageColumn(columnName, columnType, columnTypeName, columnOptional, true) {
@Override
public Object getValue(PgConnectionSupplier connection, boolean includeUnknownDatatypes) {
return Wal2JsonReplicationMessage.this.getValue(columnName, columnType, columnTypeName, rawValue, connection, includeUnknownDatatypes);
}
});
}
return columns;
}
use of io.debezium.document.Array in project debezium by debezium.
the class NonStreamingWal2JsonMessageDecoder method processMessage.
@Override
public void processMessage(ByteBuffer buffer, ReplicationMessageProcessor processor, TypeRegistry typeRegistry) throws SQLException, InterruptedException {
try {
if (!buffer.hasArray()) {
throw new IllegalStateException("Invalid buffer received from PG server during streaming replication");
}
final byte[] source = buffer.array();
final byte[] content = Arrays.copyOfRange(source, buffer.arrayOffset(), source.length);
final Document message = DocumentReader.floatNumbersAsTextReader().read(content);
LOGGER.debug("Message arrived for decoding {}", message);
final long txId = message.getLong("xid");
final String timestamp = message.getString("timestamp");
final long commitTime = dateTime.systemTimestamp(timestamp);
final Array changes = message.getArray("change");
Iterator<Entry> it = changes.iterator();
while (it.hasNext()) {
Value value = it.next().getValue();
processor.process(new Wal2JsonReplicationMessage(txId, commitTime, value.asDocument(), containsMetadata, !it.hasNext(), typeRegistry));
}
} catch (final IOException e) {
throw new ConnectException(e);
}
}
use of io.debezium.document.Array in project debezium by debezium.
the class SimpleSourceConnectorOutputTest method appendCommand.
protected void appendCommand(Path results, Document command) throws IOException {
assertThat(command).isNotNull();
assertThat(Files.exists(results)).isTrue();
Array arrayOfDocuments = readResults(results.toFile());
arrayOfDocuments.add(command);
try (OutputStream stream = new FileOutputStream(results.toFile())) {
ArrayWriter.prettyWriter().write(arrayOfDocuments, stream);
}
if (Testing.Debug.isEnabled()) {
String content = IoUtil.read(results.toFile());
Testing.debug("expected results file '" + results + "' after appending command:");
Testing.debug(content);
}
}
use of io.debezium.document.Array in project debezium by debezium.
the class SimpleSourceConnectorOutputTest method assertExpectedRecords.
protected void assertExpectedRecords(Path path, int batchCount, int recordsPerBatch) throws IOException {
assertThat(Files.exists(path)).isTrue();
if (Testing.Debug.isEnabled()) {
String content = IoUtil.read(path.toFile());
Testing.debug("expected results file '" + path + "':");
Testing.debug(content);
}
Array expected = readResults(path.toFile());
int expectedId = 0;
int expectedBatch = 1;
int expectedRecord = 0;
Iterator<Array.Entry> docs = expected.iterator();
while (docs.hasNext()) {
Document doc = docs.next().getValue().asDocument();
if (doc.has(CONTROL_KEY)) {
// This is a command so skip ...
continue;
}
++expectedId;
++expectedRecord;
if (expectedRecord > recordsPerBatch) {
++expectedBatch;
expectedRecord = 1;
}
Document sourcePartition = doc.getDocument("sourcePartition");
assertThat(sourcePartition.getString("source")).isEqualTo("simple");
Document offset = doc.getDocument("sourceOffset");
assertThat(offset.getInteger("id")).isEqualTo(expectedId);
assertThat(doc.getString("topic")).isEqualTo(TOPIC_NAME);
assertThat(doc.getInteger("kafkaPartition")).isEqualTo(1);
Document key = doc.getDocument("key");
assertThat(key.getInteger("id")).isEqualTo(expectedId);
Document value = doc.getDocument("value");
assertThat(value.getInteger("batch")).isEqualTo(expectedBatch);
assertThat(value.getInteger("record")).isEqualTo(expectedRecord);
Document keySchema = doc.getDocument("keySchema");
assertThat(keySchema.getString("name")).isEqualTo("simple.key");
assertThat(keySchema.getString("type")).isEqualToIgnoringCase(Schema.Type.STRUCT.name());
assertThat(keySchema.getBoolean("optional")).isEqualTo(false);
Array keySchemaFields = keySchema.getArray("fields");
Document keyIdField = keySchemaFields.get(0).asDocument();
assertRequiredFieldSchema(keyIdField, "id", Schema.Type.INT32);
Document valueSchema = doc.getDocument("valueSchema");
assertThat(valueSchema.getString("name")).isEqualTo("simple.value");
assertThat(valueSchema.getString("type")).isEqualToIgnoringCase(Schema.Type.STRUCT.name());
assertThat(valueSchema.getBoolean("optional")).isEqualTo(false);
Array valueSchemaFields = valueSchema.getArray("fields");
Document batchField = valueSchemaFields.get(0).asDocument();
assertRequiredFieldSchema(batchField, "batch", Schema.Type.INT32);
Document recordField = valueSchemaFields.get(1).asDocument();
assertRequiredFieldSchema(recordField, "record", Schema.Type.INT32);
}
assertThat(expectedBatch).isEqualTo(batchCount);
assertThat(expectedId).isEqualTo(batchCount * recordsPerBatch);
}
Aggregations