use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordsStreamProducerIT method shouldReceiveChangesForUpdatesWithColumnChanges.
@Test
public void shouldReceiveChangesForUpdatesWithColumnChanges() throws Exception {
// add a new column
String statements = "ALTER TABLE test_table ADD COLUMN uvc VARCHAR(2);" + "ALTER TABLE test_table REPLICA IDENTITY FULL;" + "UPDATE test_table SET uvc ='aa' WHERE pk = 1;";
consumer = testConsumer(1);
recordsProducer.start(consumer, blackHole);
executeAndWait(statements);
// the update should be the last record
SourceRecord updatedRecord = consumer.remove();
String topicName = topicName("public.test_table");
assertEquals(topicName, updatedRecord.topic());
VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
// now check we got the updated value (the old value should be null, the new one whatever we set)
List<SchemaAndValueField> expectedBefore = Collections.singletonList(new SchemaAndValueField("uvc", null, null));
assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
List<SchemaAndValueField> expectedAfter = Collections.singletonList(new SchemaAndValueField("uvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "aa"));
assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
// rename a column
statements = "ALTER TABLE test_table RENAME COLUMN uvc to xvc;" + "UPDATE test_table SET xvc ='bb' WHERE pk = 1;";
consumer.expects(1);
executeAndWait(statements);
updatedRecord = consumer.remove();
VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
// now check we got the updated value (the old value should be null, the new one whatever we set)
expectedBefore = Collections.singletonList(new SchemaAndValueField("xvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "aa"));
assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
expectedAfter = Collections.singletonList(new SchemaAndValueField("xvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "bb"));
assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
// drop a column
statements = "ALTER TABLE test_table DROP COLUMN xvc;" + "UPDATE test_table SET text ='update' WHERE pk = 1;";
consumer.expects(1);
executeAndWait(statements);
updatedRecord = consumer.remove();
VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
// change a column type
statements = "ALTER TABLE test_table ADD COLUMN modtype INTEGER;" + "INSERT INTO test_table (pk,modtype) VALUES (2,1);";
consumer.expects(1);
executeAndWait(statements);
updatedRecord = consumer.remove();
VerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 2);
assertRecordSchemaAndValues(Collections.singletonList(new SchemaAndValueField("modtype", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 1)), updatedRecord, Envelope.FieldName.AFTER);
statements = "ALTER TABLE test_table ALTER COLUMN modtype TYPE SMALLINT;" + "UPDATE test_table SET modtype = 2 WHERE pk = 2;";
consumer.expects(1);
executeAndWait(statements);
updatedRecord = consumer.remove();
VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 2);
assertRecordSchemaAndValues(Collections.singletonList(new SchemaAndValueField("modtype", SchemaBuilder.OPTIONAL_INT16_SCHEMA, (short) 1)), updatedRecord, Envelope.FieldName.BEFORE);
assertRecordSchemaAndValues(Collections.singletonList(new SchemaAndValueField("modtype", SchemaBuilder.OPTIONAL_INT16_SCHEMA, (short) 2)), updatedRecord, Envelope.FieldName.AFTER);
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordsStreamProducerIT method shouldProcessIntervalDelete.
@Test
@FixFor("DBZ-259")
public void shouldProcessIntervalDelete() throws Exception {
final String statements = "INSERT INTO table_with_interval VALUES (default, 'Foo', default);" + "INSERT INTO table_with_interval VALUES (default, 'Bar', default);" + "DELETE FROM table_with_interval WHERE id = 1;";
consumer = testConsumer(4);
recordsProducer.start(consumer, blackHole);
executeAndWait(statements);
final String topicPrefix = "public.table_with_interval";
final String topicName = topicName(topicPrefix);
final String pk = "id";
assertRecordInserted(topicPrefix, pk, 1);
assertRecordInserted(topicPrefix, pk, 2);
// first entry removed
SourceRecord record = consumer.remove();
assertEquals(topicName, record.topic());
VerifyRecord.isValidDelete(record, pk, 1);
// followed by a tombstone
record = consumer.remove();
assertEquals(topicName, record.topic());
VerifyRecord.isValidTombstone(record, pk, 1);
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordsStreamProducerIT method shouldReceiveChangesForInsertsWithPostgisArrayTypes.
@Test(timeout = 30000)
public void shouldReceiveChangesForInsertsWithPostgisArrayTypes() throws Exception {
TestHelper.executeDDL("postgis_create_tables.ddl");
// spatial_ref_sys produces a tonne of records in the postgis schema
consumer = testConsumer(1, "public");
consumer.setIgnoreExtraRecords(true);
recordsProducer.start(consumer, blackHole);
// need to wait for all the spatial_ref_sys to flow through and be ignored.
// this exceeds the normal 2s timeout.
TestHelper.execute("INSERT INTO public.dummy_table DEFAULT VALUES;");
consumer.await(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS);
while (true) {
if (!consumer.isEmpty()) {
SourceRecord record = consumer.remove();
if (record.topic().endsWith(".public.dummy_table")) {
break;
}
}
}
// now do it for actual testing
// postgis types
consumer.expects(1);
assertInsert(INSERT_POSTGIS_ARRAY_TYPES_STMT, schemaAndValuesForPostgisArrayTypes());
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class RecordsStreamProducerIT method assertRecordInserted.
private SourceRecord assertRecordInserted(String expectedTopicName, String pkColumn, int pk) throws InterruptedException {
assertFalse("records not generated", consumer.isEmpty());
SourceRecord insertedRecord = consumer.remove();
assertEquals(topicName(expectedTopicName), insertedRecord.topic());
VerifyRecord.isValidInsert(insertedRecord, pkColumn, pk);
return insertedRecord;
}
use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.
the class PostgresConnectorIT method shouldTakeBlacklistFiltersIntoAccount.
@Test
public void shouldTakeBlacklistFiltersIntoAccount() throws Exception {
String setupStmt = SETUP_TABLES_STMT + "CREATE TABLE s1.b (pk SERIAL, aa integer, bb integer, PRIMARY KEY(pk));" + "ALTER TABLE s1.a ADD COLUMN bb integer;" + "INSERT INTO s1.a (aa, bb) VALUES (2, 2);" + "INSERT INTO s1.a (aa, bb) VALUES (3, 3);" + "INSERT INTO s1.b (aa, bb) VALUES (4, 4);" + "INSERT INTO s2.a (aa) VALUES (5);";
TestHelper.execute(setupStmt);
Configuration.Builder configBuilder = TestHelper.defaultConfig().with(PostgresConnectorConfig.SNAPSHOT_MODE, INITIAL.getValue()).with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "s2").with(PostgresConnectorConfig.TABLE_BLACKLIST, ".+b").with(PostgresConnectorConfig.COLUMN_BLACKLIST, ".+bb");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
// check the records from the snapshot take the filters into account
// 3 records in s1.a and 1 in s1.b
SourceRecords actualRecords = consumeRecordsByTopic(4);
assertThat(actualRecords.recordsForTopic(topicName("s2.a"))).isNullOrEmpty();
assertThat(actualRecords.recordsForTopic(topicName("s1.b"))).isNullOrEmpty();
List<SourceRecord> recordsForS1a = actualRecords.recordsForTopic(topicName("s1.a"));
assertThat(recordsForS1a.size()).isEqualTo(3);
AtomicInteger pkValue = new AtomicInteger(1);
recordsForS1a.forEach(record -> {
VerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
assertFieldAbsent(record, "bb");
});
// insert some more records and verify the filtering behavior
String insertStmt = "INSERT INTO s1.b (aa, bb) VALUES (6, 6);" + "INSERT INTO s2.a (aa) VALUES (7);";
TestHelper.execute(insertStmt);
assertNoRecordsToConsume();
}
Aggregations