use of io.debezium.document.Document in project debezium by debezium.
the class StreamingWal2JsonMessageDecoder method processMessage.
@Override
public void processMessage(ByteBuffer buffer, ReplicationMessageProcessor processor, TypeRegistry typeRegistry) throws SQLException, InterruptedException {
try {
if (!buffer.hasArray()) {
throw new IllegalStateException("Invalid buffer received from PG server during streaming replication");
}
final byte[] source = buffer.array();
// Extend the array by two as we might need to append two chars and set them to space by default
final byte[] content = Arrays.copyOfRange(source, buffer.arrayOffset(), source.length + 2);
final int lastPos = content.length - 1;
content[lastPos - 1] = SPACE;
content[lastPos] = SPACE;
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Chunk arrived from database {}", new String(content));
}
if (!messageInProgress) {
// We received the beginning of a transaction
if (getLastNonWhiteChar(content) != RIGHT_BRACE) {
// Chunks are enabled and we have an unfinished message, it is necessary to add a sequence of closing chars
content[lastPos - 1] = RIGHT_BRACKET;
content[lastPos] = RIGHT_BRACE;
}
final Document message = DocumentReader.defaultReader().read(content);
txId = message.getLong("xid");
timestamp = message.getString("timestamp");
commitTime = dateTime.systemTimestamp(timestamp);
messageInProgress = true;
currentChunk = null;
} else {
byte firstChar = getFirstNonWhiteChar(content);
// We are receiving changes in chunks
if (firstChar == LEFT_BRACE) {
// First change, this is a valid JSON
currentChunk = content;
} else if (firstChar == COMMA) {
// following changes, they have an extra comma at the start of message
doProcessMessage(processor, typeRegistry, currentChunk, false);
replaceFirstNonWhiteChar(content, SPACE);
currentChunk = content;
} else if (firstChar == RIGHT_BRACKET) {
// No more changes
if (currentChunk != null) {
doProcessMessage(processor, typeRegistry, currentChunk, true);
}
messageInProgress = false;
} else {
throw new ConnectException("Chunk arrived in unexpected state");
}
}
} catch (final IOException e) {
throw new ConnectException(e);
}
}
use of io.debezium.document.Document in project debezium by debezium.
the class SourceInfoTest method shouldComparePositionsWithDifferentFields.
@Test
public void shouldComparePositionsWithDifferentFields() {
Document history = positionWith("mysql-bin.000008", 380941551, "01261278-6ade-11e6-b36a-42010af00790:1-378422946," + "4d1a4918-44ba-11e6-bf12-42010af0040b:1-11002284," + "716ec46f-d522-11e5-bb56-0242ac110004:1-34673215," + "96c2072e-e428-11e6-9590-42010a28002d:1-3," + "c627b2bc-9647-11e6-a886-42010af0044a:1-9541144", 0, 0, true);
Document current = positionWith("mysql-bin.000016", 645115324, "01261278-6ade-11e6-b36a-42010af00790:1-400944168," + "30efb117-e42a-11e6-ba9e-42010a28002e:1-9," + "4d1a4918-44ba-11e6-bf12-42010af0040b:1-11604379," + "621dc2f6-803b-11e6-acc1-42010af000a4:1-7963838," + "716ec46f-d522-11e5-bb56-0242ac110004:1-35850702," + "c627b2bc-9647-11e6-a886-42010af0044a:1-10426868," + "d079cbb3-750f-11e6-954e-42010af00c28:1-11544291:11544293-11885648", 2, 1, false);
assertThatDocument(current).isAfter(history);
Set<String> excludes = Collections.singleton("96c2072e-e428-11e6-9590-42010a28002d");
assertThatDocument(history).isAtOrBefore(current, (uuid) -> !excludes.contains(uuid));
}
use of io.debezium.document.Document in project debezium by debezium.
the class SimpleSourceConnectorOutputTest method assertExpectedRecords.
protected void assertExpectedRecords(Path path, int batchCount, int recordsPerBatch) throws IOException {
assertThat(Files.exists(path)).isTrue();
if (Testing.Debug.isEnabled()) {
String content = IoUtil.read(path.toFile());
Testing.debug("expected results file '" + path + "':");
Testing.debug(content);
}
Array expected = readResults(path.toFile());
int expectedId = 0;
int expectedBatch = 1;
int expectedRecord = 0;
Iterator<Array.Entry> docs = expected.iterator();
while (docs.hasNext()) {
Document doc = docs.next().getValue().asDocument();
if (doc.has(CONTROL_KEY)) {
// This is a command so skip ...
continue;
}
++expectedId;
++expectedRecord;
if (expectedRecord > recordsPerBatch) {
++expectedBatch;
expectedRecord = 1;
}
Document sourcePartition = doc.getDocument("sourcePartition");
assertThat(sourcePartition.getString("source")).isEqualTo("simple");
Document offset = doc.getDocument("sourceOffset");
assertThat(offset.getInteger("id")).isEqualTo(expectedId);
assertThat(doc.getString("topic")).isEqualTo(TOPIC_NAME);
assertThat(doc.getInteger("kafkaPartition")).isEqualTo(1);
Document key = doc.getDocument("key");
assertThat(key.getInteger("id")).isEqualTo(expectedId);
Document value = doc.getDocument("value");
assertThat(value.getInteger("batch")).isEqualTo(expectedBatch);
assertThat(value.getInteger("record")).isEqualTo(expectedRecord);
Document keySchema = doc.getDocument("keySchema");
assertThat(keySchema.getString("name")).isEqualTo("simple.key");
assertThat(keySchema.getString("type")).isEqualToIgnoringCase(Schema.Type.STRUCT.name());
assertThat(keySchema.getBoolean("optional")).isEqualTo(false);
Array keySchemaFields = keySchema.getArray("fields");
Document keyIdField = keySchemaFields.get(0).asDocument();
assertRequiredFieldSchema(keyIdField, "id", Schema.Type.INT32);
Document valueSchema = doc.getDocument("valueSchema");
assertThat(valueSchema.getString("name")).isEqualTo("simple.value");
assertThat(valueSchema.getString("type")).isEqualToIgnoringCase(Schema.Type.STRUCT.name());
assertThat(valueSchema.getBoolean("optional")).isEqualTo(false);
Array valueSchemaFields = valueSchema.getArray("fields");
Document batchField = valueSchemaFields.get(0).asDocument();
assertRequiredFieldSchema(batchField, "batch", Schema.Type.INT32);
Document recordField = valueSchemaFields.get(1).asDocument();
assertRequiredFieldSchema(recordField, "record", Schema.Type.INT32);
}
assertThat(expectedBatch).isEqualTo(batchCount);
assertThat(expectedId).isEqualTo(batchCount * recordsPerBatch);
}
use of io.debezium.document.Document in project debezium by debezium.
the class ConnectorOutputTest method rehydrateSourceRecord.
private SourceRecord rehydrateSourceRecord(Document record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException {
Document sourcePartitionDoc = record.getDocument("sourcePartition");
Document sourceOffsetDoc = record.getDocument("sourceOffset");
String topic = record.getString("topic");
Integer kafkaPartition = record.getInteger("kafkaPartition");
Document keySchema = record.getDocument("keySchema");
Document valueSchema = record.getDocument("valueSchema");
Document key = record.getDocument("key");
Document value = record.getDocument("value");
Document keyAndSchemaDoc = Document.create("schema", keySchema, "payload", key);
Document valueAndSchemaDoc = Document.create("schema", valueSchema, "payload", value);
SchemaAndValue keyWithSchema = keyConverter.deserialize(topic, keyAndSchemaDoc);
SchemaAndValue valueWithSchema = valueConverter.deserialize(topic, valueAndSchemaDoc);
Map<String, ?> sourcePartition = toMap(sourcePartitionDoc);
Map<String, ?> sourceOffset = toMap(sourceOffsetDoc);
return new SourceRecord(sourcePartition, sourceOffset, topic, kafkaPartition, keyWithSchema.schema(), keyWithSchema.value(), valueWithSchema.schema(), valueWithSchema.value());
}
Aggregations