use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class UnwrapFromMongoDbEnvelopeTest method shouldGenerateRecordForDeleteEvent.
@Test
public void shouldGenerateRecordForDeleteEvent() throws InterruptedException {
BsonTimestamp ts = new BsonTimestamp(1000, 1);
CollectionId collectionId = new CollectionId("rs0", "dbA", "c1");
ObjectId objId = new ObjectId();
Document obj = new Document("_id", objId);
// given
Document event = new Document().append("o", obj).append("ns", "dbA.c1").append("ts", ts).append("h", Long.valueOf(12345678)).append("op", "d");
RecordsForCollection records = recordMakers.forCollection(collectionId);
records.recordEvent(event, 1002);
assertThat(produced.size()).isEqualTo(2);
SourceRecord record = produced.get(0);
// when
SourceRecord transformed = transformation.apply(record);
Struct key = (Struct) transformed.key();
Struct value = (Struct) transformed.value();
// then assert key and its schema
assertThat(key.schema()).isSameAs(transformed.keySchema());
assertThat(key.schema().field("id").schema()).isEqualTo(SchemaBuilder.OPTIONAL_STRING_SCHEMA);
assertThat(key.get("id")).isEqualTo(objId.toString());
assertThat(value).isNull();
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class AbstractReader method poll.
@Override
public List<SourceRecord> poll() throws InterruptedException {
// Before we do anything else, determine if there was a failure and throw that exception ...
failureException = this.failure.get();
if (failureException != null) {
// Regardless, there may be records on the queue that will never be consumed.
throw failureException;
}
// this reader has been stopped before it reached the success or failed end state, so clean up and abort
if (!running.get()) {
cleanupResources();
throw new InterruptedException("Reader was stopped while polling");
}
logger.trace("Polling for next batch of records");
List<SourceRecord> batch = new ArrayList<>(maxBatchSize);
final Timer timeout = Threads.timer(Clock.SYSTEM, Temporals.max(pollInterval, ConfigurationDefaults.RETURN_CONTROL_INTERVAL));
while (running.get() && (records.drainTo(batch, maxBatchSize) == 0) && !success.get()) {
// No records are available even though the snapshot has not yet completed, so sleep for a bit ...
metronome.pause();
// Check for failure after waking up ...
failureException = this.failure.get();
if (failureException != null)
throw failureException;
if (timeout.expired()) {
break;
}
}
if (batch.isEmpty() && success.get() && records.isEmpty()) {
// We found no records but the operation completed successfully, so we're done
this.running.set(false);
cleanupResources();
return null;
}
pollComplete(batch);
logger.trace("Completed batch of {} records", batch.size());
return batch;
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class BinlogReader method pollComplete.
@Override
protected void pollComplete(List<SourceRecord> batch) {
// Record a bit about this batch ...
int batchSize = batch.size();
recordCounter += batchSize;
totalRecordCounter.addAndGet(batchSize);
if (batchSize > 0) {
SourceRecord lastRecord = batch.get(batchSize - 1);
lastOffset = lastRecord.sourceOffset();
if (pollOutputDelay.hasElapsed()) {
// We want to record the status ...
long millisSinceLastOutput = clock.currentTimeInMillis() - previousOutputMillis;
try {
context.temporaryLoggingContext("binlog", () -> {
logger.info("{} records sent during previous {}, last recorded offset: {}", recordCounter, Strings.duration(millisSinceLastOutput), lastOffset);
});
} finally {
recordCounter = 0;
previousOutputMillis += millisSinceLastOutput;
}
}
}
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class MongoDbConnectorIT method shouldConsumeAllEventsFromDatabase.
@Test
public void shouldConsumeAllEventsFromDatabase() throws InterruptedException, IOException {
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit().with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10).with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*").with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo").build();
// Set up the replication context for connections ...
context = new MongoDbTaskContext(config);
// Cleanup database
TestHelper.cleanDatabase(primary(), "dbit");
// Before starting the connector, add data to the databases ...
storeDocuments("dbit", "simpletons", "simple_objects.json");
storeDocuments("dbit", "restaurants", "restaurants1.json");
// Start the connector ...
start(MongoDbConnector.class, config);
// ---------------------------------------------------------------------------------------------------------------
// Consume all of the events due to startup and initialization of the database
// ---------------------------------------------------------------------------------------------------------------
SourceRecords records = consumeRecordsByTopic(12);
records.topics().forEach(System.out::println);
assertThat(records.recordsForTopic("mongo.dbit.simpletons").size()).isEqualTo(6);
assertThat(records.recordsForTopic("mongo.dbit.restaurants").size()).isEqualTo(6);
assertThat(records.topics().size()).isEqualTo(2);
AtomicBoolean foundLast = new AtomicBoolean(false);
records.forEach(record -> {
// Check that all records are valid, and can be serialized and deserialized ...
validate(record);
verifyFromInitialSync(record, foundLast);
verifyReadOperation(record);
});
assertThat(foundLast.get()).isTrue();
// At this point, the connector has performed the initial sync and awaits changes ...
// ---------------------------------------------------------------------------------------------------------------
// Store more documents while the connector is still running
// ---------------------------------------------------------------------------------------------------------------
storeDocuments("dbit", "restaurants", "restaurants2.json");
// Wait until we can consume the 4 documents we just added ...
SourceRecords records2 = consumeRecordsByTopic(4);
assertThat(records2.recordsForTopic("mongo.dbit.restaurants").size()).isEqualTo(4);
assertThat(records2.topics().size()).isEqualTo(1);
records2.forEach(record -> {
// Check that all records are valid, and can be serialized and deserialized ...
validate(record);
verifyNotFromInitialSync(record);
verifyCreateOperation(record);
});
// ---------------------------------------------------------------------------------------------------------------
// Stop the connector
// ---------------------------------------------------------------------------------------------------------------
stopConnector();
// ---------------------------------------------------------------------------------------------------------------
// Store more documents while the connector is NOT running
// ---------------------------------------------------------------------------------------------------------------
storeDocuments("dbit", "restaurants", "restaurants3.json");
// ---------------------------------------------------------------------------------------------------------------
// Start the connector and we should only see the documents added since it was stopped
// ---------------------------------------------------------------------------------------------------------------
start(MongoDbConnector.class, config);
// Wait until we can consume the 4 documents we just added ...
SourceRecords records3 = consumeRecordsByTopic(5);
assertThat(records3.recordsForTopic("mongo.dbit.restaurants").size()).isEqualTo(5);
assertThat(records3.topics().size()).isEqualTo(1);
records3.forEach(record -> {
// Check that all records are valid, and can be serialized and deserialized ...
validate(record);
verifyNotFromInitialSync(record);
verifyCreateOperation(record);
});
// ---------------------------------------------------------------------------------------------------------------
// Store more documents while the connector is still running
// ---------------------------------------------------------------------------------------------------------------
storeDocuments("dbit", "restaurants", "restaurants4.json");
// Wait until we can consume the 4 documents we just added ...
SourceRecords records4 = consumeRecordsByTopic(8);
assertThat(records4.recordsForTopic("mongo.dbit.restaurants").size()).isEqualTo(8);
assertThat(records4.topics().size()).isEqualTo(1);
records4.forEach(record -> {
// Check that all records are valid, and can be serialized and deserialized ...
validate(record);
verifyNotFromInitialSync(record);
verifyCreateOperation(record);
});
// ---------------------------------------------------------------------------------------------------------------
// Create and then update a document
// ---------------------------------------------------------------------------------------------------------------
// Testing.Debug.enable();
AtomicReference<String> id = new AtomicReference<>();
primary().execute("create", mongo -> {
MongoDatabase db1 = mongo.getDatabase("dbit");
MongoCollection<Document> coll = db1.getCollection("arbitrary");
coll.drop();
// Insert the document with a generated ID ...
Document doc = Document.parse("{\"a\": 1, \"b\": 2}");
InsertOneOptions insertOptions = new InsertOneOptions().bypassDocumentValidation(true);
coll.insertOne(doc, insertOptions);
// Find the document to get the generated ID ...
doc = coll.find().first();
Testing.debug("Document: " + doc);
id.set(doc.getObjectId("_id").toString());
Testing.debug("Document ID: " + id.get());
});
primary().execute("update", mongo -> {
MongoDatabase db1 = mongo.getDatabase("dbit");
MongoCollection<Document> coll = db1.getCollection("arbitrary");
// Find the document ...
Document doc = coll.find().first();
Testing.debug("Document: " + doc);
Document filter = Document.parse("{\"a\": 1}");
Document operation = Document.parse("{ \"$set\": { \"b\": 10 } }");
coll.updateOne(filter, operation);
doc = coll.find().first();
Testing.debug("Document: " + doc);
});
// Wait until we can consume the 1 insert and 1 update ...
SourceRecords insertAndUpdate = consumeRecordsByTopic(2);
assertThat(insertAndUpdate.recordsForTopic("mongo.dbit.arbitrary").size()).isEqualTo(2);
assertThat(insertAndUpdate.topics().size()).isEqualTo(1);
records4.forEach(record -> {
// Check that all records are valid, and can be serialized and deserialized ...
validate(record);
verifyNotFromInitialSync(record);
verifyCreateOperation(record);
});
SourceRecord insertRecord = insertAndUpdate.allRecordsInOrder().get(0);
SourceRecord updateRecord = insertAndUpdate.allRecordsInOrder().get(1);
Testing.debug("Insert event: " + insertRecord);
Testing.debug("Update event: " + updateRecord);
Struct insertKey = (Struct) insertRecord.key();
Struct updateKey = (Struct) updateRecord.key();
String insertId = JSON.parse(insertKey.getString("id")).toString();
String updateId = JSON.parse(updateKey.getString("id")).toString();
assertThat(insertId).isEqualTo(id.get());
assertThat(updateId).isEqualTo(id.get());
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordMakersTest method shouldGenerateRecordForDeleteEventWithoutTombstone.
@Test
@FixFor("DBZ-582")
public void shouldGenerateRecordForDeleteEventWithoutTombstone() throws InterruptedException {
RecordMakers recordMakers = new RecordMakers(source, topicSelector, produced::add, false);
BsonTimestamp ts = new BsonTimestamp(1000, 1);
CollectionId collectionId = new CollectionId("rs0", "dbA", "c1");
ObjectId objId = new ObjectId();
Document obj = new Document("_id", objId);
Document event = new Document().append("o", obj).append("ns", "dbA.c1").append("ts", ts).append("h", new Long(12345678)).append("op", "d");
RecordsForCollection records = recordMakers.forCollection(collectionId);
records.recordEvent(event, 1002);
assertThat(produced.size()).isEqualTo(1);
SourceRecord record = produced.get(0);
Struct key = (Struct) record.key();
Struct value = (Struct) record.value();
assertThat(key.schema()).isSameAs(record.keySchema());
assertThat(key.get("id")).isEqualTo(JSONSerializers.getStrict().serialize(objId));
assertThat(value.schema()).isSameAs(record.valueSchema());
assertThat(value.getString(FieldName.AFTER)).isNull();
assertThat(value.getString("patch")).isNull();
assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.DELETE.code());
assertThat(value.getInt64(FieldName.TIMESTAMP)).isEqualTo(1002L);
Struct actualSource = value.getStruct(FieldName.SOURCE);
Struct expectedSource = source.lastOffsetStruct("rs0", collectionId);
assertThat(actualSource).isEqualTo(expectedSource);
}
Aggregations