Search in sources :

Example 46 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsSnapshotProducerIT method shouldGenerateSnapshotsForDefaultDatatypesAdaptiveMicroseconds.

@Test
@FixFor("DBZ-342")
public void shouldGenerateSnapshotsForDefaultDatatypesAdaptiveMicroseconds() throws Exception {
    PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.TIME_PRECISION_MODE, TemporalPrecisionMode.ADAPTIVE_TIME_MICROSECONDS).build());
    TopicSelector selector = TopicSelector.create(config);
    context = new PostgresTaskContext(config, new PostgresSchema(config, TestHelper.getTypeRegistry(), selector), selector);
    snapshotProducer = new RecordsSnapshotProducer(context, new SourceInfo(TestHelper.TEST_SERVER), false);
    TestConsumer consumer = testConsumer(ALL_STMTS.size(), "public", "Quoted_\"");
    // insert data for each of different supported types
    String statementsBuilder = ALL_STMTS.stream().collect(Collectors.joining(";" + System.lineSeparator())) + ";";
    TestHelper.execute(statementsBuilder);
    // then start the producer and validate all records are there
    snapshotProducer.start(consumer, e -> {
    });
    consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
    Map<String, List<SchemaAndValueField>> expectedValuesByTableName = super.schemaAndValuesByTableNameAdaptiveTimeMicroseconds();
    consumer.process(record -> assertReadRecord(record, expectedValuesByTableName));
    // check the offset information for each record
    while (!consumer.isEmpty()) {
        SourceRecord record = consumer.remove();
        assertRecordOffset(record, true, consumer.isEmpty());
    }
}
Also used : List(java.util.List) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test) FixFor(io.debezium.doc.FixFor)

Example 47 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsStreamProducerIT method shouldReceiveChangesForDefaultValues.

@Test
public void shouldReceiveChangesForDefaultValues() throws Exception {
    String statements = "ALTER TABLE test_table REPLICA IDENTITY FULL;" + "ALTER TABLE test_table ADD COLUMN default_column TEXT DEFAULT 'default';" + "INSERT INTO test_table (text) VALUES ('update');";
    consumer = testConsumer(1);
    recordsProducer.start(consumer, blackHole);
    executeAndWait(statements);
    SourceRecord insertRecord = consumer.remove();
    assertEquals(topicName("public.test_table"), insertRecord.topic());
    VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
    List<SchemaAndValueField> expectedSchemaAndValues = Arrays.asList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update"), new SchemaAndValueField("default_column", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "default"));
    assertRecordSchemaAndValues(expectedSchemaAndValues, insertRecord, Envelope.FieldName.AFTER);
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Example 48 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsStreamProducerIT method shouldReceiveChangesForUpdates.

@Test
public void shouldReceiveChangesForUpdates() throws Exception {
    consumer = testConsumer(1);
    recordsProducer.start(consumer, blackHole);
    executeAndWait("UPDATE test_table set text='update' WHERE pk=1");
    // the update record should be the last record
    SourceRecord updatedRecord = consumer.remove();
    String topicName = topicName("public.test_table");
    assertEquals(topicName, updatedRecord.topic());
    VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
    // default replica identity only fires previous values for PK changes
    List<SchemaAndValueField> expectedAfter = Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update"));
    assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
    // alter the table and set its replica identity to full the issue another update
    consumer.expects(1);
    TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY FULL");
    executeAndWait("UPDATE test_table set text='update2' WHERE pk=1");
    updatedRecord = consumer.remove();
    assertEquals(topicName, updatedRecord.topic());
    VerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
    // now we should get both old and new values
    List<SchemaAndValueField> expectedBefore = Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update"));
    assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
    expectedAfter = Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update2"));
    assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Example 49 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsStreamProducerIT method shouldReceiveChangesForUpdatesWithPKChanges.

@Test
public void shouldReceiveChangesForUpdatesWithPKChanges() throws Exception {
    consumer = testConsumer(3);
    recordsProducer.start(consumer, blackHole);
    executeAndWait("UPDATE test_table SET text = 'update', pk = 2");
    String topicName = topicName("public.test_table");
    // first should be a delete of the old pk
    SourceRecord deleteRecord = consumer.remove();
    assertEquals(topicName, deleteRecord.topic());
    VerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
    // followed by a tombstone of the old pk
    SourceRecord tombstoneRecord = consumer.remove();
    assertEquals(topicName, tombstoneRecord.topic());
    VerifyRecord.isValidTombstone(tombstoneRecord, PK_FIELD, 1);
    // and finally insert of the new value
    SourceRecord insertRecord = consumer.remove();
    assertEquals(topicName, insertRecord.topic());
    VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Example 50 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsStreamProducerIT method shouldReceiveChangesForDeletes.

@Test
public void shouldReceiveChangesForDeletes() throws Exception {
    // add a new entry and remove both
    String statements = "INSERT INTO test_table (text) VALUES ('insert2');" + "DELETE FROM test_table WHERE pk > 0;";
    consumer = testConsumer(5);
    recordsProducer.start(consumer, blackHole);
    executeAndWait(statements);
    String topicPrefix = "public.test_table";
    String topicName = topicName(topicPrefix);
    assertRecordInserted(topicPrefix, PK_FIELD, 2);
    // first entry removed
    SourceRecord record = consumer.remove();
    assertEquals(topicName, record.topic());
    VerifyRecord.isValidDelete(record, PK_FIELD, 1);
    // followed by a tombstone
    record = consumer.remove();
    assertEquals(topicName, record.topic());
    VerifyRecord.isValidTombstone(record, PK_FIELD, 1);
    // second entry removed
    record = consumer.remove();
    assertEquals(topicName, record.topic());
    VerifyRecord.isValidDelete(record, PK_FIELD, 2);
    // followed by a tombstone
    record = consumer.remove();
    assertEquals(topicName, record.topic());
    VerifyRecord.isValidTombstone(record, PK_FIELD, 2);
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Aggregations

SourceRecord (org.apache.kafka.connect.source.SourceRecord)308 Test (org.junit.Test)148 Test (org.junit.jupiter.api.Test)98 Struct (org.apache.kafka.connect.data.Struct)68 HashMap (java.util.HashMap)60 Schema (org.apache.kafka.connect.data.Schema)45 ThreadedTest (org.apache.kafka.connect.util.ThreadedTest)27 ParameterizedTest (org.apache.kafka.connect.util.ParameterizedTest)23 ArrayList (java.util.ArrayList)22 RetryWithToleranceOperatorTest (org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest)21 Map (java.util.Map)15 SchemaBuilder (org.apache.kafka.connect.data.SchemaBuilder)13 ConnectException (org.apache.kafka.connect.errors.ConnectException)13 Document (org.bson.Document)13 FixFor (io.debezium.doc.FixFor)12 List (java.util.List)12 RecordsForCollection (io.debezium.connector.mongodb.RecordMakers.RecordsForCollection)11 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)11 ConnectHeaders (org.apache.kafka.connect.header.ConnectHeaders)11 BsonTimestamp (org.bson.BsonTimestamp)11