Search in sources :

Example 61 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class RecordsStreamProducer method generateCreateRecord.

protected void generateCreateRecord(TableId tableId, Object[] rowData, boolean isLastEventForLsn, BlockingConsumer<ChangeEvent> recordConsumer) throws InterruptedException {
    if (rowData == null || rowData.length == 0) {
        logger.warn("no new values found for table '{}' from update message at '{}';skipping record", tableId, sourceInfo);
        return;
    }
    TableSchema tableSchema = schema().schemaFor(tableId);
    assert tableSchema != null;
    Object key = tableSchema.keyFromColumnData(rowData);
    Struct value = tableSchema.valueFromColumnData(rowData);
    if (key == null || value == null) {
        return;
    }
    Schema keySchema = tableSchema.keySchema();
    Map<String, ?> partition = sourceInfo.partition();
    Map<String, ?> offset = sourceInfo.offset();
    String topicName = topicSelector().topicNameFor(tableId);
    Envelope envelope = tableSchema.getEnvelopeSchema();
    SourceRecord record = new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.create(value, sourceInfo.source(), clock().currentTimeInMillis()));
    if (logger.isDebugEnabled()) {
        logger.debug("sending create event '{}' to topic '{}'", record, topicName);
    }
    recordConsumer.accept(new ChangeEvent(record, isLastEventForLsn));
}
Also used : TableSchema(io.debezium.relational.TableSchema) Schema(org.apache.kafka.connect.data.Schema) TableSchema(io.debezium.relational.TableSchema) Envelope(io.debezium.data.Envelope) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct)

Example 62 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class BinlogReaderBufferIT method shouldProcessLargeTransaction.

@Test
public void shouldProcessLargeTransaction() throws SQLException, InterruptedException {
    String masterPort = System.getProperty("database.port", "3306");
    String replicaPort = System.getProperty("database.replica.port", "3306");
    boolean replicaIsMaster = masterPort.equals(replicaPort);
    if (!replicaIsMaster) {
        // Give time for the replica to catch up to the master ...
        Thread.sleep(5000L);
    }
    // Use the DB configuration to define the connector's configuration to use the "replica"
    // which may be the same as the "master" ...
    config = Configuration.create().with(MySqlConnectorConfig.HOSTNAME, System.getProperty("database.replica.hostname", "localhost")).with(MySqlConnectorConfig.PORT, System.getProperty("database.replica.port", "3306")).with(MySqlConnectorConfig.USER, "snapper").with(MySqlConnectorConfig.PASSWORD, "snapperpass").with(MySqlConnectorConfig.SERVER_ID, 18765).with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName()).with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED).with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10).with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName()).with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class).with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true).with(MySqlConnectorConfig.BUFFER_SIZE_FOR_BINLOG_READER, 9).with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH).build();
    // Start the connector ...
    start(MySqlConnector.class, config);
    // 11 schema change records + 1 SET statement
    SourceRecords records = consumeRecordsByTopic(5 + 9 + 9 + 4 + 11 + 1);
    // ---------------------------------------------------------------------------------------------------------------
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        final int numRecords = 40;
        try (JdbcConnection connection = db.connect()) {
            final Connection jdbc = connection.connection();
            connection.setAutoCommit(false);
            final Statement statement = jdbc.createStatement();
            for (int i = 0; i < numRecords; i++) {
                statement.executeUpdate(String.format("INSERT INTO customers\n" + "VALUES (default,\"%s\",\"%s\",\"%s\")", i, i, i));
            }
            jdbc.commit();
            connection.query("SELECT * FROM customers", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
            connection.setAutoCommit(true);
        }
        // All records should be present only once
        records = consumeRecordsByTopic(numRecords);
        int recordIndex = 0;
        for (SourceRecord r : records.allRecordsInOrder()) {
            Struct envelope = (Struct) r.value();
            assertThat(envelope.getString("op")).isEqualTo(("c"));
            assertThat(envelope.getStruct("after").getString("email")).isEqualTo(Integer.toString(recordIndex++));
        }
        assertThat(records.topics().size()).isEqualTo(1);
        Testing.print("*** Done with large TX");
    }
}
Also used : Statement(java.sql.Statement) Connection(java.sql.Connection) JdbcConnection(io.debezium.jdbc.JdbcConnection) JdbcConnection(io.debezium.jdbc.JdbcConnection) FileDatabaseHistory(io.debezium.relational.history.FileDatabaseHistory) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Savepoint(java.sql.Savepoint) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.Test) AbstractConnectorTest(io.debezium.embedded.AbstractConnectorTest)

Example 63 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class BinlogReaderIT method shouldHandleMySQLTimeCorrectly.

@Test
@FixFor("DBZ-342")
public void shouldHandleMySQLTimeCorrectly() throws Exception {
    final UniqueDatabase REGRESSION_DATABASE = new UniqueDatabase("logical_server_name", "regression_test").withDbHistoryPath(DB_HISTORY_PATH);
    REGRESSION_DATABASE.createAndInitialize();
    String tableName = "dbz_342_timetest";
    config = simpleConfig().with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false).with(MySqlConnectorConfig.DATABASE_WHITELIST, REGRESSION_DATABASE.getDatabaseName()).with(MySqlConnectorConfig.TABLE_WHITELIST, REGRESSION_DATABASE.qualifiedTableName(tableName)).build();
    context = new MySqlTaskContext(config);
    context.start();
    // start from beginning
    context.source().setBinlogStartPoint("", 0L);
    context.initializeHistory();
    reader = new BinlogReader("binlog", context);
    // Start reading the binlog ...
    reader.start();
    // only 1 insert
    int expectedChanges = 1;
    consumeAtLeast(expectedChanges);
    // Check the records via the store ...
    List<SourceRecord> sourceRecords = store.sourceRecords();
    assertThat(sourceRecords.size()).isEqualTo(1);
    SourceRecord sourceRecord = sourceRecords.get(0);
    Struct value = (Struct) sourceRecord.value();
    Struct after = value.getStruct(Envelope.FieldName.AFTER);
    // '517:51:04.777'
    long c1 = after.getInt64("c1");
    Duration c1Time = Duration.ofNanos(c1 * 1_000);
    Duration c1ExpectedTime = toDuration("PT517H51M4.78S");
    assertEquals(c1ExpectedTime, c1Time);
    assertEquals(c1ExpectedTime.toNanos(), c1Time.toNanos());
    assertThat(c1Time.toNanos()).isEqualTo(1864264780000000L);
    assertThat(c1Time).isEqualTo(Duration.ofHours(517).plusMinutes(51).plusSeconds(4).plusMillis(780));
    // '-13:14:50'
    long c2 = after.getInt64("c2");
    Duration c2Time = Duration.ofNanos(c2 * 1_000);
    Duration c2ExpectedTime = toDuration("-PT13H14M50S");
    assertEquals(c2ExpectedTime, c2Time);
    assertEquals(c2ExpectedTime.toNanos(), c2Time.toNanos());
    assertThat(c2Time.toNanos()).isEqualTo(-47690000000000L);
    assertTrue(c2Time.isNegative());
    assertThat(c2Time).isEqualTo(Duration.ofHours(-13).minusMinutes(14).minusSeconds(50));
    // '-733:00:00.0011'
    long c3 = after.getInt64("c3");
    Duration c3Time = Duration.ofNanos(c3 * 1_000);
    Duration c3ExpectedTime = toDuration("-PT733H0M0.001S");
    assertEquals(c3ExpectedTime, c3Time);
    assertEquals(c3ExpectedTime.toNanos(), c3Time.toNanos());
    assertThat(c3Time.toNanos()).isEqualTo(-2638800001000000L);
    assertTrue(c3Time.isNegative());
    assertThat(c3Time).isEqualTo(Duration.ofHours(-733).minusMillis(1));
    // '-1:59:59.0011'
    long c4 = after.getInt64("c4");
    Duration c4Time = Duration.ofNanos(c4 * 1_000);
    Duration c4ExpectedTime = toDuration("-PT1H59M59.001S");
    assertEquals(c4ExpectedTime, c4Time);
    assertEquals(c4ExpectedTime.toNanos(), c4Time.toNanos());
    assertThat(c4Time.toNanos()).isEqualTo(-7199001000000L);
    assertTrue(c4Time.isNegative());
    assertThat(c4Time).isEqualTo(Duration.ofHours(-1).minusMinutes(59).minusSeconds(59).minusMillis(1));
    // '-838:59:58.999999'
    long c5 = after.getInt64("c5");
    Duration c5Time = Duration.ofNanos(c5 * 1_000);
    Duration c5ExpectedTime = toDuration("-PT838H59M58.999999S");
    assertEquals(c5ExpectedTime, c5Time);
    assertEquals(c5ExpectedTime.toNanos(), c5Time.toNanos());
    assertThat(c5Time.toNanos()).isEqualTo(-3020398999999000L);
    assertTrue(c5Time.isNegative());
    assertThat(c5Time).isEqualTo(Duration.ofHours(-838).minusMinutes(59).minusSeconds(58).minusNanos(999999000));
}
Also used : Duration(java.time.Duration) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.Test) FixFor(io.debezium.doc.FixFor)

Example 64 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class MySqlConnectorIT method shouldEmitTombstoneOnDeleteByDefault.

@Test
@FixFor("DBZ-582")
public void shouldEmitTombstoneOnDeleteByDefault() throws Exception {
    config = DATABASE.defaultConfig().with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.NEVER).build();
    // Start the connector ...
    start(MySqlConnector.class, config);
    // ---------------------------------------------------------------------------------------------------------------
    // Consume all of the events due to startup and initialization of the database
    // ---------------------------------------------------------------------------------------------------------------
    // 6 DDL changes
    SourceRecords records = consumeRecordsByTopic(9 + 9 + 4 + 5 + 6);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("orders")).size()).isEqualTo(5);
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("UPDATE orders SET order_number=10101 WHERE order_number=10001");
        }
    }
    // Consume the update of the PK, which is one insert followed by a delete followed by a tombstone ...
    records = consumeRecordsByTopic(3);
    List<SourceRecord> updates = records.recordsForTopic(DATABASE.topicForTable("orders"));
    assertThat(updates.size()).isEqualTo(3);
    assertDelete(updates.get(0), "order_number", 10001);
    assertTombstone(updates.get(1), "order_number", 10001);
    assertInsert(updates.get(2), "order_number", 10101);
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("DELETE FROM orders WHERE order_number=10101");
        }
    }
    records = consumeRecordsByTopic(2);
    updates = records.recordsForTopic(DATABASE.topicForTable("orders"));
    assertThat(updates.size()).isEqualTo(2);
    assertDelete(updates.get(0), "order_number", 10101);
    assertTombstone(updates.get(1), "order_number", 10101);
    stopConnector();
}
Also used : JdbcConnection(io.debezium.jdbc.JdbcConnection) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test) AbstractConnectorTest(io.debezium.embedded.AbstractConnectorTest) FixFor(io.debezium.doc.FixFor)

Example 65 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class UnwrapFromMongoDbEnvelopeTest method setup.

@Before
public void setup() {
    source = new SourceInfo(SERVER_NAME);
    topicSelector = TopicSelector.defaultSelector(PREFIX);
    produced = new ArrayList<>();
    recordMakers = new RecordMakers(source, topicSelector, produced::add, true);
    transformation = new UnwrapFromMongoDbEnvelope<SourceRecord>();
    transformation.configure(Collections.emptyMap());
}
Also used : SourceInfo(io.debezium.connector.mongodb.SourceInfo) RecordMakers(io.debezium.connector.mongodb.RecordMakers) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Before(org.junit.Before)

Aggregations

SourceRecord (org.apache.kafka.connect.source.SourceRecord)308 Test (org.junit.Test)148 Test (org.junit.jupiter.api.Test)98 Struct (org.apache.kafka.connect.data.Struct)68 HashMap (java.util.HashMap)60 Schema (org.apache.kafka.connect.data.Schema)45 ThreadedTest (org.apache.kafka.connect.util.ThreadedTest)27 ParameterizedTest (org.apache.kafka.connect.util.ParameterizedTest)23 ArrayList (java.util.ArrayList)22 RetryWithToleranceOperatorTest (org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest)21 Map (java.util.Map)15 SchemaBuilder (org.apache.kafka.connect.data.SchemaBuilder)13 ConnectException (org.apache.kafka.connect.errors.ConnectException)13 Document (org.bson.Document)13 FixFor (io.debezium.doc.FixFor)12 List (java.util.List)12 RecordsForCollection (io.debezium.connector.mongodb.RecordMakers.RecordsForCollection)11 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)11 ConnectHeaders (org.apache.kafka.connect.header.ConnectHeaders)11 BsonTimestamp (org.bson.BsonTimestamp)11