use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class SnapshotReaderIT method shouldCreateSnapshotOfSingleDatabaseWithSchemaChanges.
@Test
public void shouldCreateSnapshotOfSingleDatabaseWithSchemaChanges() throws Exception {
config = simpleConfig().with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true).build();
context = new MySqlTaskContext(config);
context.start();
reader = new SnapshotReader("snapshot", context);
reader.uponCompletion(completed::countDown);
reader.generateInsertEvents();
// Start the snapshot ...
reader.start();
// Poll for records ...
// Testing.Print.enable();
List<SourceRecord> records = null;
KeyValueStore store = KeyValueStore.createForTopicsBeginningWith(DATABASE.getServerName() + ".");
SchemaChangeHistory schemaChanges = new SchemaChangeHistory(DATABASE.getServerName());
while ((records = reader.poll()) != null) {
records.forEach(record -> {
VerifyRecord.isValid(record);
store.add(record);
schemaChanges.add(record);
});
}
// The last poll should always return null ...
assertThat(records).isNull();
// There should be 11 schema changes plus 1 SET statement ...
assertThat(schemaChanges.recordCount()).isEqualTo(14);
assertThat(schemaChanges.databaseCount()).isEqualTo(2);
assertThat(schemaChanges.databases()).containsOnly(DATABASE.getDatabaseName(), "");
// Check the records via the store ...
assertThat(store.collectionCount()).isEqualTo(5);
Collection products = store.collection(DATABASE.getDatabaseName(), productsTableName());
assertThat(products.numberOfCreates()).isEqualTo(9);
assertThat(products.numberOfUpdates()).isEqualTo(0);
assertThat(products.numberOfDeletes()).isEqualTo(0);
assertThat(products.numberOfReads()).isEqualTo(0);
assertThat(products.numberOfTombstones()).isEqualTo(0);
assertThat(products.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(products.numberOfValueSchemaChanges()).isEqualTo(1);
Collection products_on_hand = store.collection(DATABASE.getDatabaseName(), "products_on_hand");
assertThat(products_on_hand.numberOfCreates()).isEqualTo(9);
assertThat(products_on_hand.numberOfUpdates()).isEqualTo(0);
assertThat(products_on_hand.numberOfDeletes()).isEqualTo(0);
assertThat(products_on_hand.numberOfReads()).isEqualTo(0);
assertThat(products_on_hand.numberOfTombstones()).isEqualTo(0);
assertThat(products_on_hand.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(products_on_hand.numberOfValueSchemaChanges()).isEqualTo(1);
Collection customers = store.collection(DATABASE.getDatabaseName(), "customers");
assertThat(customers.numberOfCreates()).isEqualTo(4);
assertThat(customers.numberOfUpdates()).isEqualTo(0);
assertThat(customers.numberOfDeletes()).isEqualTo(0);
assertThat(customers.numberOfReads()).isEqualTo(0);
assertThat(customers.numberOfTombstones()).isEqualTo(0);
assertThat(customers.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(customers.numberOfValueSchemaChanges()).isEqualTo(1);
Collection orders = store.collection(DATABASE.getDatabaseName(), "orders");
assertThat(orders.numberOfCreates()).isEqualTo(5);
assertThat(orders.numberOfUpdates()).isEqualTo(0);
assertThat(orders.numberOfDeletes()).isEqualTo(0);
assertThat(orders.numberOfReads()).isEqualTo(0);
assertThat(orders.numberOfTombstones()).isEqualTo(0);
assertThat(orders.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(orders.numberOfValueSchemaChanges()).isEqualTo(1);
Collection timetest = store.collection(DATABASE.getDatabaseName(), "dbz_342_timetest");
assertThat(timetest.numberOfCreates()).isEqualTo(1);
assertThat(timetest.numberOfUpdates()).isEqualTo(0);
assertThat(timetest.numberOfDeletes()).isEqualTo(0);
assertThat(timetest.numberOfReads()).isEqualTo(0);
assertThat(timetest.numberOfTombstones()).isEqualTo(0);
assertThat(timetest.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(timetest.numberOfValueSchemaChanges()).isEqualTo(1);
final List<Struct> timerecords = new ArrayList<>();
timetest.forEach(val -> {
timerecords.add(((Struct) val.value()).getStruct("after"));
});
Struct after = timerecords.get(0);
assertThat(after.get("c1")).isEqualTo(toMicroSeconds("PT517H51M04.78S"));
assertThat(after.get("c2")).isEqualTo(toMicroSeconds("-PT13H14M50S"));
assertThat(after.get("c3")).isEqualTo(toMicroSeconds("-PT733H0M0.001S"));
assertThat(after.get("c4")).isEqualTo(toMicroSeconds("-PT1H59M59.001S"));
assertThat(after.get("c5")).isEqualTo(toMicroSeconds("-PT838H59M58.999999S"));
// Make sure the snapshot completed ...
if (completed.await(10, TimeUnit.SECONDS)) {
// completed the snapshot ...
Testing.print("completed the snapshot");
} else {
fail("failed to complete the snapshot within 10 seconds");
}
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class SnapshotReaderIT method shouldCreateSnapshotOfSingleDatabase.
@Test
public void shouldCreateSnapshotOfSingleDatabase() throws Exception {
config = simpleConfig().build();
context = new MySqlTaskContext(config);
context.start();
reader = new SnapshotReader("snapshot", context);
reader.uponCompletion(completed::countDown);
reader.generateInsertEvents();
// Start the snapshot ...
reader.start();
// Poll for records ...
// Testing.Print.enable();
List<SourceRecord> records = null;
KeyValueStore store = KeyValueStore.createForTopicsBeginningWith(DATABASE.getServerName() + ".");
SchemaChangeHistory schemaChanges = new SchemaChangeHistory(DATABASE.getServerName());
while ((records = reader.poll()) != null) {
records.forEach(record -> {
VerifyRecord.isValid(record);
store.add(record);
schemaChanges.add(record);
});
}
// The last poll should always return null ...
assertThat(records).isNull();
// There should be no schema changes ...
assertThat(schemaChanges.recordCount()).isEqualTo(0);
// Check the records via the store ...
assertThat(store.collectionCount()).isEqualTo(5);
Collection products = store.collection(DATABASE.getDatabaseName(), productsTableName());
assertThat(products.numberOfCreates()).isEqualTo(9);
assertThat(products.numberOfUpdates()).isEqualTo(0);
assertThat(products.numberOfDeletes()).isEqualTo(0);
assertThat(products.numberOfReads()).isEqualTo(0);
assertThat(products.numberOfTombstones()).isEqualTo(0);
assertThat(products.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(products.numberOfValueSchemaChanges()).isEqualTo(1);
Collection products_on_hand = store.collection(DATABASE.getDatabaseName(), "products_on_hand");
assertThat(products_on_hand.numberOfCreates()).isEqualTo(9);
assertThat(products_on_hand.numberOfUpdates()).isEqualTo(0);
assertThat(products_on_hand.numberOfDeletes()).isEqualTo(0);
assertThat(products_on_hand.numberOfReads()).isEqualTo(0);
assertThat(products_on_hand.numberOfTombstones()).isEqualTo(0);
assertThat(products_on_hand.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(products_on_hand.numberOfValueSchemaChanges()).isEqualTo(1);
Collection customers = store.collection(DATABASE.getDatabaseName(), "customers");
assertThat(customers.numberOfCreates()).isEqualTo(4);
assertThat(customers.numberOfUpdates()).isEqualTo(0);
assertThat(customers.numberOfDeletes()).isEqualTo(0);
assertThat(customers.numberOfReads()).isEqualTo(0);
assertThat(customers.numberOfTombstones()).isEqualTo(0);
assertThat(customers.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(customers.numberOfValueSchemaChanges()).isEqualTo(1);
Collection orders = store.collection(DATABASE.getDatabaseName(), "orders");
assertThat(orders.numberOfCreates()).isEqualTo(5);
assertThat(orders.numberOfUpdates()).isEqualTo(0);
assertThat(orders.numberOfDeletes()).isEqualTo(0);
assertThat(orders.numberOfReads()).isEqualTo(0);
assertThat(orders.numberOfTombstones()).isEqualTo(0);
assertThat(orders.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(orders.numberOfValueSchemaChanges()).isEqualTo(1);
Collection timetest = store.collection(DATABASE.getDatabaseName(), "dbz_342_timetest");
assertThat(timetest.numberOfCreates()).isEqualTo(1);
assertThat(timetest.numberOfUpdates()).isEqualTo(0);
assertThat(timetest.numberOfDeletes()).isEqualTo(0);
assertThat(timetest.numberOfReads()).isEqualTo(0);
assertThat(timetest.numberOfTombstones()).isEqualTo(0);
assertThat(timetest.numberOfKeySchemaChanges()).isEqualTo(1);
assertThat(timetest.numberOfValueSchemaChanges()).isEqualTo(1);
final List<Struct> timerecords = new ArrayList<>();
timetest.forEach(val -> {
timerecords.add(((Struct) val.value()).getStruct("after"));
});
Struct after = timerecords.get(0);
assertThat(after.get("c1")).isEqualTo(toMicroSeconds("PT517H51M04.78S"));
assertThat(after.get("c2")).isEqualTo(toMicroSeconds("-PT13H14M50S"));
assertThat(after.get("c3")).isEqualTo(toMicroSeconds("-PT733H0M0.001S"));
assertThat(after.get("c4")).isEqualTo(toMicroSeconds("-PT1H59M59.001S"));
assertThat(after.get("c5")).isEqualTo(toMicroSeconds("-PT838H59M58.999999S"));
// Make sure the snapshot completed ...
if (completed.await(10, TimeUnit.SECONDS)) {
// completed the snapshot ...
Testing.print("completed the snapshot");
} else {
fail("failed to complete the snapshot within 10 seconds");
}
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class SnapshotReaderIT method shouldCreateSnapshotSchemaOnlyRecovery_exception.
@Test(expected = ConnectException.class)
public void shouldCreateSnapshotSchemaOnlyRecovery_exception() throws Exception {
config = simpleConfig().with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.SCHEMA_ONLY_RECOVERY).build();
context = new MySqlTaskContext(config);
context.start();
reader = new SnapshotReader("snapshot", context);
reader.uponCompletion(completed::countDown);
reader.generateInsertEvents();
// Start the snapshot ...
reader.start();
// Poll for records ...
// Testing.Print.enable();
List<SourceRecord> records = null;
KeyValueStore store = KeyValueStore.createForTopicsBeginningWith(DATABASE.getServerName() + ".");
SchemaChangeHistory schemaChanges = new SchemaChangeHistory(DATABASE.getServerName());
while ((records = reader.poll()) != null) {
records.forEach(record -> {
VerifyRecord.isValid(record);
store.add(record);
schemaChanges.add(record);
});
}
// should fail because we have no existing binlog information
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordMakers method schemaChanges.
/**
* Produce a schema change record for the given DDL statements.
*
* @param databaseName the name of the database that is affected by the DDL statements; may not be null
* @param ddlStatements the DDL statements; may not be null
* @param consumer the consumer for all produced records; may not be null
* @return the number of records produced; will be 0 or more
*/
public int schemaChanges(String databaseName, String ddlStatements, BlockingConsumer<SourceRecord> consumer) {
String topicName = topicSelector.getPrimaryTopic();
Integer partition = 0;
Struct key = schemaChangeRecordKey(databaseName);
Struct value = schemaChangeRecordValue(databaseName, ddlStatements);
SourceRecord record = new SourceRecord(source.partition(), source.offset(), topicName, partition, schemaChangeKeySchema, key, schemaChangeValueSchema, value);
try {
consumer.accept(record);
return 1;
} catch (InterruptedException e) {
return 0;
}
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordsSnapshotProducer method sendCurrentRecord.
private void sendCurrentRecord(BlockingConsumer<ChangeEvent> consumer) throws InterruptedException {
SourceRecord record = currentRecord.get();
if (record == null) {
return;
}
if (logger.isDebugEnabled()) {
logger.debug("sending read event '{}'", record);
}
// send the last generated record
consumer.accept(new ChangeEvent(record));
}
Aggregations