Search in sources :

Example 1 with CompletionResult

use of io.debezium.embedded.EmbeddedEngine.CompletionResult in project debezium by debezium.

the class ConnectorOutputTest method runConnector.

/**
 * Run the connector described by the supplied test specification.
 *
 * @param spec the test specification
 * @param callback the function that should be called when the connector is stopped
 */
protected void runConnector(TestSpecification spec, CompletionCallback callback) {
    PreviousContext preRunContext = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
    final Configuration environmentConfig = Configuration.copy(spec.environment()).build();
    final Configuration connectorConfig = spec.config();
    String[] ignorableFieldNames = environmentConfig.getString(ENV_IGNORE_FIELDS, "").split(",");
    final Set<String> ignorableFields = Arrays.stream(ignorableFieldNames).map(String::trim).collect(Collectors.toSet());
    String[] globallyIgnorableFieldNames = globallyIgnorableFieldNames();
    if (globallyIgnorableFieldNames != null && globallyIgnorableFieldNames.length != 0) {
        ignorableFields.addAll(Arrays.stream(globallyIgnorableFieldNames).map(String::trim).collect(Collectors.toSet()));
    }
    final SchemaAndValueConverter keyConverter = new SchemaAndValueConverter(environmentConfig, true);
    final SchemaAndValueConverter valueConverter = new SchemaAndValueConverter(environmentConfig, false);
    final TestData testData = spec.testData();
    // Get any special comparators ...
    final Map<String, RecordValueComparator> comparatorsByFieldName = new HashMap<>();
    addValueComparatorsByFieldPath(comparatorsByFieldName::put);
    final Map<String, RecordValueComparator> comparatorsBySchemaName = new HashMap<>();
    addValueComparatorsBySchemaName(comparatorsBySchemaName::put);
    RuntimeException runError = null;
    CompletionResult problem = new CompletionResult(callback);
    try {
        // Set up the test data ...
        final PreviewIterator<Document> expectedRecords = Iterators.preview(testData.read());
        final Consumer<Document> recorder = testData::write;
        // We need something that will measure the amount of time since our consumer has seen a record ...
        TimeSince timeSinceLastRecord = Threads.timeSince(Clock.SYSTEM);
        // We'll keep the last 10 expected and actual records so that there is some context if they don't match ...
        Queue<SourceRecord> actualRecordHistory = fixedSizeQueue(10);
        Queue<SourceRecord> expectedRecordHistory = fixedSizeQueue(10);
        // Define what happens for each record ...
        ConsumerCompletion result = new ConsumerCompletion();
        Consumer<SourceRecord> consumer = (actualRecord) -> {
            PreviousContext prev = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
            try {
                Testing.debug("actual record:    " + SchemaUtil.asString(actualRecord));
                timeSinceLastRecord.reset();
                // Record the actual in the history ...
                actualRecordHistory.add(actualRecord);
                // And possibly hand it to the test's recorder ...
                try {
                    Document jsonRecord = serializeSourceRecord(actualRecord, keyConverter, valueConverter);
                    if (jsonRecord != null)
                        recorder.accept(jsonRecord);
                } catch (IOException e) {
                    String msg = "Error converting JSON to SourceRecord";
                    Testing.debug(msg);
                    throw new ConnectException(msg, e);
                }
                if (expectedRecords != null) {
                    // Get the test's next expected record ...
                    if (!expectedRecords.hasNext()) {
                        // We received an actual record but don't have or expect one ...
                        String msg = "Source record found but nothing expected";
                        result.error();
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    }
                    Document expected = expectedRecords.next();
                    if (isEndCommand(expected)) {
                        result.error();
                        String msg = "Source record was found but not expected: " + SchemaUtil.asString(actualRecord);
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    } else if (isCommand(expected)) {
                        Testing.debug("applying command: " + SchemaUtil.asString(expected));
                        applyCommand(expected, result);
                    } else {
                        try {
                            // Otherwise, build a record from the expected and add it to the history ...
                            SourceRecord expectedRecord = rehydrateSourceRecord(expected, keyConverter, valueConverter);
                            expectedRecordHistory.add(expectedRecord);
                            Testing.debug("expected record:  " + SchemaUtil.asString(expectedRecord));
                            // And compare the records ...
                            try {
                                assertSourceRecordMatch(actualRecord, expectedRecord, ignorableFields::contains, comparatorsByFieldName, comparatorsBySchemaName);
                            } catch (AssertionError e) {
                                result.error();
                                String msg = "Source record with key " + SchemaUtil.asString(actualRecord.key()) + " did not match expected record: " + e.getMessage();
                                Testing.debug(msg);
                                throw new MismatchRecordException(e, msg, actualRecordHistory, expectedRecordHistory);
                            }
                        } catch (IOException e) {
                            result.exception();
                            String msg = "Error converting JSON to SourceRecord";
                            Testing.debug(msg);
                            throw new ConnectException(msg, e);
                        }
                    }
                    if (!expectedRecords.hasNext()) {
                        // We expect no more records, so stop the connector ...
                        result.stop();
                        String msg = "Stopping connector after no more expected records found";
                        Testing.debug(msg);
                        throw new StopConnectorException(msg);
                    }
                    // Peek at the next record to see if it is a command ...
                    Document nextExpectedRecord = expectedRecords.peek();
                    if (isCommand(nextExpectedRecord)) {
                        // consume it and apply it ...
                        applyCommand(expectedRecords.next(), result);
                    }
                }
            } finally {
                prev.restore();
            }
        };
        // Set up the configuration for the engine to include the connector configuration and apply as defaults
        // the environment and engine parameters ...
        Configuration engineConfig = Configuration.copy(connectorConfig).withDefault(environmentConfig).withDefault(EmbeddedEngine.ENGINE_NAME, spec.name()).withDefault(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).withDefault(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
        // Create the engine ...
        EmbeddedEngine engine = EmbeddedEngine.create().using(engineConfig).notifying(consumer).using(this.getClass().getClassLoader()).using(problem).build();
        long connectorTimeoutInSeconds = environmentConfig.getLong(ENV_CONNECTOR_TIMEOUT_IN_SECONDS, 10);
        // Get ready to run the connector one or more times ...
        do {
            // Each time create a thread that will stop our connector if we don't get enough results
            Thread timeoutThread = Threads.timeout(spec.name() + "-connector-output", connectorTimeoutInSeconds, TimeUnit.SECONDS, timeSinceLastRecord, engine::stop);
            // But plan to stop our timeout thread as soon as the connector completes ...
            result.uponCompletion(timeoutThread::interrupt);
            timeoutThread.start();
            // Run the connector and block until the connector is stopped by the timeout thread or until
            // an exception is thrown within the connector (perhaps by the consumer) ...
            Testing.debug("Starting connector");
            result.reset();
            engine.run();
        } while (result.get() == ExecutionResult.RESTART_REQUESTED);
    } catch (IOException e) {
        runError = new RuntimeException("Error reading test data: " + e.getMessage(), e);
    } catch (RuntimeException t) {
        runError = t;
    } finally {
        // And clean up everything ...
        try {
            testData.close();
        } catch (IOException e) {
            if (runError != null) {
                runError = new RuntimeException("Error closing test data: " + e.getMessage(), e);
            }
        } finally {
            try {
                keyConverter.close();
            } finally {
                try {
                    valueConverter.close();
                } finally {
                    preRunContext.restore();
                }
            }
        }
    }
    if (runError != null) {
        throw runError;
    }
    if (problem.hasError()) {
        Throwable error = problem.error();
        if (error instanceof AssertionError) {
            fail(problem.message());
        } else if (error instanceof MismatchRecordException) {
            MismatchRecordException mismatch = (MismatchRecordException) error;
            LinkedList<SourceRecord> actualHistory = mismatch.getActualRecords();
            LinkedList<SourceRecord> expectedHistory = mismatch.getExpectedRecords();
            Testing.print("");
            Testing.print("FAILURE in connector integration test '" + spec.name() + "' in class " + getClass());
            Testing.print(" actual record:   " + SchemaUtil.asString(actualHistory.getLast()));
            Testing.print(" expected record: " + SchemaUtil.asString(expectedHistory.getLast()));
            Testing.print(mismatch.getMessage());
            Testing.print("");
            AssertionError cause = ((MismatchRecordException) error).getError();
            if (cause != null) {
                throw cause;
            }
            fail(problem.message());
        } else if (error instanceof RuntimeException) {
            throw (RuntimeException) error;
        } else {
            throw new RuntimeException(error);
        }
    }
}
Also used : Arrays(java.util.Arrays) PreviewIterator(io.debezium.util.Iterators.PreviewIterator) Threads(io.debezium.util.Threads) Schema(org.apache.kafka.connect.data.Schema) LoggingContext(io.debezium.util.LoggingContext) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) Map(java.util.Map) After(org.junit.After) Assert.fail(org.junit.Assert.fail) JsonNode(com.fasterxml.jackson.databind.JsonNode) Path(java.nio.file.Path) DocumentReader(io.debezium.document.DocumentReader) Predicate(java.util.function.Predicate) Set(java.util.Set) Collectors(java.util.stream.Collectors) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ArrayReader(io.debezium.document.ArrayReader) StandardCharsets(java.nio.charset.StandardCharsets) List(java.util.List) Queue(java.util.Queue) JsonConverter(org.apache.kafka.connect.json.JsonConverter) SchemaUtil(io.debezium.data.SchemaUtil) Value(io.debezium.document.Value) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) Array(io.debezium.document.Array) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ArrayList(java.util.ArrayList) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Document(io.debezium.document.Document) BiConsumer(java.util.function.BiConsumer) ArrayWriter(io.debezium.document.ArrayWriter) LinkedList(java.util.LinkedList) Strings(io.debezium.util.Strings) Before(org.junit.Before) OutputStream(java.io.OutputStream) Properties(java.util.Properties) Iterators(io.debezium.util.Iterators) Iterator(java.util.Iterator) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) Configuration(io.debezium.config.Configuration) FileInputStream(java.io.FileInputStream) CompletionCallback(io.debezium.embedded.EmbeddedEngine.CompletionCallback) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) Testing(io.debezium.util.Testing) Paths(java.nio.file.Paths) Collect(io.debezium.util.Collect) ConnectException(org.apache.kafka.connect.errors.ConnectException) VerifyRecord(io.debezium.data.VerifyRecord) Clock(io.debezium.util.Clock) TimeSince(io.debezium.util.Threads.TimeSince) IoUtil(io.debezium.util.IoUtil) InputStream(java.io.InputStream) Configuration(io.debezium.config.Configuration) HashMap(java.util.HashMap) Document(io.debezium.document.Document) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConnectException(org.apache.kafka.connect.errors.ConnectException) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) TimeSince(io.debezium.util.Threads.TimeSince)

Example 2 with CompletionResult

use of io.debezium.embedded.EmbeddedEngine.CompletionResult in project debezium by debezium.

the class MySqlConnectorIT method shouldConsumeAllEventsFromDatabaseUsingSnapshot.

@Test
public void shouldConsumeAllEventsFromDatabaseUsingSnapshot() throws SQLException, InterruptedException {
    String masterPort = System.getProperty("database.port", "3306");
    String replicaPort = System.getProperty("database.replica.port", "3306");
    boolean replicaIsMaster = masterPort.equals(replicaPort);
    if (!replicaIsMaster) {
        // Give time for the replica to catch up to the master ...
        Thread.sleep(5000L);
    }
    // Use the DB configuration to define the connector's configuration to use the "replica"
    // which may be the same as the "master" ...
    config = Configuration.create().with(MySqlConnectorConfig.HOSTNAME, System.getProperty("database.replica.hostname", "localhost")).with(MySqlConnectorConfig.PORT, System.getProperty("database.replica.port", "3306")).with(MySqlConnectorConfig.USER, "snapper").with(MySqlConnectorConfig.PASSWORD, "snapperpass").with(MySqlConnectorConfig.SERVER_ID, 18765).with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName()).with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED).with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10).with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName()).with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class).with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true).with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH).build();
    // Start the connector ...
    start(MySqlConnector.class, config);
    // Testing.Print.enable();
    // ---------------------------------------------------------------------------------------------------------------
    // Consume all of the events due to startup and initialization of the database
    // ---------------------------------------------------------------------------------------------------------------
    // 11 schema change records + 1 SET statement
    SourceRecords records = consumeRecordsByTopic(5 + 9 + 9 + 4 + 11 + 1 + 2);
    assertThat(records.recordsForTopic(DATABASE.getServerName()).size()).isEqualTo(12);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("products")).size()).isEqualTo(9);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("products_on_hand")).size()).isEqualTo(9);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("customers")).size()).isEqualTo(4);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("orders")).size()).isEqualTo(5);
    assertThat(records.topics().size()).isEqualTo(5);
    assertThat(records.databaseNames().size()).isEqualTo(2);
    assertThat(records.ddlRecordsForDatabase(DATABASE.getDatabaseName()).size()).isEqualTo(11);
    assertThat(records.ddlRecordsForDatabase("readbinlog_test")).isNull();
    assertThat(records.ddlRecordsForDatabase("").size()).isEqualTo(1);
    records.ddlRecordsForDatabase(DATABASE.getDatabaseName()).forEach(this::print);
    // Check that all records are valid, can be serialized and deserialized ...
    records.forEach(this::validate);
    // Check that the last record has snapshots disabled in the offset, but not in the source
    List<SourceRecord> allRecords = records.allRecordsInOrder();
    SourceRecord last = allRecords.get(allRecords.size() - 1);
    SourceRecord secondToLast = allRecords.get(allRecords.size() - 2);
    assertThat(secondToLast.sourceOffset().containsKey(SourceInfo.SNAPSHOT_KEY)).isTrue();
    // not snapshot
    assertThat(last.sourceOffset().containsKey(SourceInfo.SNAPSHOT_KEY)).isFalse();
    assertThat(((Struct) secondToLast.value()).getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.SNAPSHOT_KEY)).isTrue();
    assertThat(((Struct) last.value()).getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.SNAPSHOT_KEY)).isTrue();
    // ---------------------------------------------------------------------------------------------------------------
    // Stopping the connector does not lose events recorded when connector is not running
    // ---------------------------------------------------------------------------------------------------------------
    // Make sure there are no more events and then stop the connector ...
    waitForAvailableRecords(3, TimeUnit.SECONDS);
    int totalConsumed = consumeAvailableRecords(this::print);
    System.out.println("TOTAL CONSUMED = " + totalConsumed);
    // assertThat(totalConsumed).isEqualTo(0);
    stopConnector();
    // Make some changes to data only while the connector is stopped ...
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
            connection.execute("INSERT INTO products VALUES (default,'robot','Toy robot',1.304);");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // Testing.Print.enable();
    // Restart the connector and read the insert record ...
    Testing.print("*** Restarting connector after inserts were made");
    start(MySqlConnector.class, config);
    records = consumeRecordsByTopic(1);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("products")).size()).isEqualTo(1);
    assertThat(records.topics().size()).isEqualTo(1);
    List<SourceRecord> inserts = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertInsert(inserts.get(0), "id", 110);
    Testing.print("*** Done with inserts and restart");
    Testing.print("*** Stopping connector");
    stopConnector();
    Testing.print("*** Restarting connector");
    start(MySqlConnector.class, config);
    // ---------------------------------------------------------------------------------------------------------------
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("INSERT INTO products VALUES (1001,'roy','old robot',1234.56);");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // And consume the one insert ...
    records = consumeRecordsByTopic(1);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("products")).size()).isEqualTo(1);
    assertThat(records.topics().size()).isEqualTo(1);
    inserts = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertInsert(inserts.get(0), "id", 1001);
    // ---------------------------------------------------------------------------------------------------------------
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("UPDATE products SET id=2001, description='really old robot' WHERE id=1001");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // And consume the update of the PK, which is one insert followed by a delete followed by a tombstone ...
    records = consumeRecordsByTopic(3);
    List<SourceRecord> updates = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertThat(updates.size()).isEqualTo(3);
    assertDelete(updates.get(0), "id", 1001);
    assertTombstone(updates.get(1), "id", 1001);
    assertInsert(updates.get(2), "id", 2001);
    Testing.print("*** Done with PK change");
    // ---------------------------------------------------------------------------------------------------------------
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("UPDATE products SET weight=1345.67 WHERE id=2001");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // And consume the one update ...
    records = consumeRecordsByTopic(1);
    assertThat(records.topics().size()).isEqualTo(1);
    updates = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertThat(updates.size()).isEqualTo(1);
    assertUpdate(updates.get(0), "id", 2001);
    updates.forEach(this::validate);
    Testing.print("*** Done with simple update");
    // Add a column with default to the 'products' table and explicitly update one record ...
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute(String.format("ALTER TABLE %s.products ADD COLUMN volume FLOAT, ADD COLUMN alias VARCHAR(30) NULL AFTER description", DATABASE.getDatabaseName()));
            connection.execute("UPDATE products SET volume=13.5 WHERE id=2001");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // And consume the one schema change event and one update event ...
    records = consumeRecordsByTopic(2);
    assertThat(records.topics().size()).isEqualTo(2);
    assertThat(records.recordsForTopic(DATABASE.getServerName()).size()).isEqualTo(1);
    updates = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertThat(updates.size()).isEqualTo(1);
    assertUpdate(updates.get(0), "id", 2001);
    updates.forEach(this::validate);
    Testing.print("*** Done with schema change (same db and fully-qualified name)");
    // Connect to a different database, but use the fully qualified name for a table in our database ...
    try (MySQLConnection db = MySQLConnection.forTestDatabase("emptydb")) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute(String.format("CREATE TABLE %s.stores (" + " id INT(11) PRIMARY KEY NOT NULL AUTO_INCREMENT," + " first_name VARCHAR(255) NOT NULL," + " last_name VARCHAR(255) NOT NULL," + " email VARCHAR(255) NOT NULL );", DATABASE.getDatabaseName()));
        }
    }
    // And consume the one schema change event only ...
    records = consumeRecordsByTopic(1);
    assertThat(records.topics().size()).isEqualTo(1);
    assertThat(records.recordsForTopic(DATABASE.getServerName()).size()).isEqualTo(1);
    records.recordsForTopic(DATABASE.getServerName()).forEach(this::validate);
    Testing.print("*** Done with PK change (different db and fully-qualified name)");
    // Do something completely different with a table we've not modified yet and then read that event.
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.execute("UPDATE products_on_hand SET quantity=20 WHERE product_id=109");
            connection.query("SELECT * FROM products_on_hand", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
        }
    }
    // And make sure we consume that one update ...
    records = consumeRecordsByTopic(1);
    assertThat(records.topics().size()).isEqualTo(1);
    updates = records.recordsForTopic(DATABASE.topicForTable("products_on_hand"));
    assertThat(updates.size()).isEqualTo(1);
    assertUpdate(updates.get(0), "product_id", 109);
    updates.forEach(this::validate);
    Testing.print("*** Done with verifying no additional events");
    // ---------------------------------------------------------------------------------------------------------------
    // Stop the connector ...
    // ---------------------------------------------------------------------------------------------------------------
    stopConnector();
    // ---------------------------------------------------------------------------------------------------------------
    // Restart the connector to read only part of a transaction ...
    // ---------------------------------------------------------------------------------------------------------------
    Testing.print("*** Restarting connector");
    CompletionResult completion = new CompletionResult();
    start(MySqlConnector.class, config, completion, (record) -> {
        // We want to stop before processing record 3003 ...
        Struct key = (Struct) record.key();
        Number id = (Number) key.get("id");
        if (id.intValue() == 3003) {
            return true;
        }
        return false;
    });
    BinlogPosition positionBeforeInserts = new BinlogPosition();
    BinlogPosition positionAfterInserts = new BinlogPosition();
    BinlogPosition positionAfterUpdate = new BinlogPosition();
    try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
        try (JdbcConnection connection = db.connect()) {
            connection.query("SHOW MASTER STATUS", positionBeforeInserts::readFromDatabase);
            connection.execute("INSERT INTO products(id,name,description,weight,volume,alias) VALUES " + "(3001,'ashley','super robot',34.56,0.00,'ashbot'), " + "(3002,'arthur','motorcycle',87.65,0.00,'arcycle'), " + "(3003,'oak','tree',987.65,0.00,'oak');");
            connection.query("SELECT * FROM products", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
            connection.query("SHOW MASTER STATUS", positionAfterInserts::readFromDatabase);
            // Change something else that is unrelated ...
            connection.execute("UPDATE products_on_hand SET quantity=40 WHERE product_id=109");
            connection.query("SELECT * FROM products_on_hand", rs -> {
                if (Testing.Print.isEnabled())
                    connection.print(rs);
            });
            connection.query("SHOW MASTER STATUS", positionAfterUpdate::readFromDatabase);
        }
    }
    // Testing.Print.enable();
    // And consume the one insert ...
    records = consumeRecordsByTopic(2);
    assertThat(records.recordsForTopic(DATABASE.topicForTable("products")).size()).isEqualTo(2);
    assertThat(records.topics().size()).isEqualTo(1);
    inserts = records.recordsForTopic(DATABASE.topicForTable("products"));
    assertInsert(inserts.get(0), "id", 3001);
    assertInsert(inserts.get(1), "id", 3002);
    // Verify that the connector has stopped ...
    completion.await(10, TimeUnit.SECONDS);
    assertThat(completion.hasCompleted()).isTrue();
    assertThat(completion.hasError()).isTrue();
    assertThat(completion.success()).isFalse();
    assertNoRecordsToConsume();
    assertConnectorNotRunning();
    // ---------------------------------------------------------------------------------------------------------------
    // Stop the connector ...
    // ---------------------------------------------------------------------------------------------------------------
    stopConnector();
    // Read the last committed offsets, and verify the binlog coordinates ...
    SourceInfo persistedOffsetSource = new SourceInfo();
    persistedOffsetSource.setServerName(config.getString(MySqlConnectorConfig.SERVER_NAME));
    Map<String, ?> lastCommittedOffset = readLastCommittedOffset(config, persistedOffsetSource.partition());
    persistedOffsetSource.setOffset(lastCommittedOffset);
    Testing.print("Position before inserts: " + positionBeforeInserts);
    Testing.print("Position after inserts:  " + positionAfterInserts);
    Testing.print("Offset: " + lastCommittedOffset);
    Testing.print("Position after update:  " + positionAfterUpdate);
    if (replicaIsMaster) {
        // Same binlog filename ...
        assertThat(persistedOffsetSource.binlogFilename()).isEqualTo(positionBeforeInserts.binlogFilename());
        assertThat(persistedOffsetSource.binlogFilename()).isEqualTo(positionAfterInserts.binlogFilename());
        // Binlog position in offset should be more than before the inserts, but less than the position after the inserts ...
        assertThat(persistedOffsetSource.binlogPosition()).isGreaterThan(positionBeforeInserts.binlogPosition());
        assertThat(persistedOffsetSource.binlogPosition()).isLessThan(positionAfterInserts.binlogPosition());
    } else {
    // the replica is not the same server as the master, so it will have a different binlog filename and position ...
    }
    // Event number is 2 ...
    assertThat(persistedOffsetSource.eventsToSkipUponRestart()).isEqualTo(2);
    // GTID set should match the before-inserts GTID set ...
    // assertThat(persistedOffsetSource.gtidSet()).isEqualTo(positionBeforeInserts.gtidSet());
    Testing.print("*** Restarting connector, and should begin with inserting 3003 (not 109!)");
    start(MySqlConnector.class, config);
    // And consume the insert for 3003 ...
    records = consumeRecordsByTopic(1);
    assertThat(records.topics().size()).isEqualTo(1);
    inserts = records.recordsForTopic(DATABASE.topicForTable("products"));
    if (inserts == null) {
        updates = records.recordsForTopic(DATABASE.topicForTable("products_on_hand"));
        if (updates != null) {
            fail("Restarted connector and missed the insert of product id=3003!");
        }
    }
    // Read the first record produced since we've restarted
    SourceRecord prod3003 = inserts.get(0);
    assertInsert(prod3003, "id", 3003);
    // Check that the offset has the correct/expected values ...
    assertOffset(prod3003, "file", lastCommittedOffset.get("file"));
    assertOffset(prod3003, "pos", lastCommittedOffset.get("pos"));
    assertOffset(prod3003, "row", 3);
    assertOffset(prod3003, "event", lastCommittedOffset.get("event"));
    // Check that the record has all of the column values ...
    assertValueField(prod3003, "after/id", 3003);
    assertValueField(prod3003, "after/name", "oak");
    assertValueField(prod3003, "after/description", "tree");
    assertValueField(prod3003, "after/weight", 987.65d);
    assertValueField(prod3003, "after/volume", 0.0d);
    assertValueField(prod3003, "after/alias", "oak");
    // And make sure we consume that one extra update ...
    records = consumeRecordsByTopic(1);
    assertThat(records.topics().size()).isEqualTo(1);
    updates = records.recordsForTopic(DATABASE.topicForTable("products_on_hand"));
    assertThat(updates.size()).isEqualTo(1);
    assertUpdate(updates.get(0), "product_id", 109);
    updates.forEach(this::validate);
    // Start the connector again, and we should see the next two
    Testing.print("*** Done with simple insert");
}
Also used : CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) JdbcConnection(io.debezium.jdbc.JdbcConnection) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.Test) AbstractConnectorTest(io.debezium.embedded.AbstractConnectorTest)

Aggregations

CompletionResult (io.debezium.embedded.EmbeddedEngine.CompletionResult)2 JsonNode (com.fasterxml.jackson.databind.JsonNode)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 Configuration (io.debezium.config.Configuration)1 SchemaUtil (io.debezium.data.SchemaUtil)1 VerifyRecord (io.debezium.data.VerifyRecord)1 RecordValueComparator (io.debezium.data.VerifyRecord.RecordValueComparator)1 Array (io.debezium.document.Array)1 ArrayReader (io.debezium.document.ArrayReader)1 ArrayWriter (io.debezium.document.ArrayWriter)1 Document (io.debezium.document.Document)1 DocumentReader (io.debezium.document.DocumentReader)1 Value (io.debezium.document.Value)1 AbstractConnectorTest (io.debezium.embedded.AbstractConnectorTest)1 CompletionCallback (io.debezium.embedded.EmbeddedEngine.CompletionCallback)1 JdbcConnection (io.debezium.jdbc.JdbcConnection)1 Clock (io.debezium.util.Clock)1 Collect (io.debezium.util.Collect)1 IoUtil (io.debezium.util.IoUtil)1 Iterators (io.debezium.util.Iterators)1