Search in sources :

Example 11 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class KafkaDatabaseHistoryTest method shouldStopOnUnparseableSQL.

@Test(expected = ParsingException.class)
public void shouldStopOnUnparseableSQL() throws Exception {
    // Create the empty topic ...
    kafka.createTopic(topicName, 1, 1);
    // Create invalid records
    final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}");
    final Configuration intruderConfig = Configuration.create().withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()).withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder").withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class).withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class).build();
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) {
        producer.send(invalidSQL).get();
    }
    testHistoryTopicContent(false);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Configuration(io.debezium.config.Configuration) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 12 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class KafkaDatabaseHistoryTest method testHistoryTopicContent.

private void testHistoryTopicContent(boolean skipUnparseableDDL) {
    // Start up the history ...
    Configuration config = Configuration.create().with(KafkaDatabaseHistory.BOOTSTRAP_SERVERS, kafka.brokerList()).with(KafkaDatabaseHistory.TOPIC, topicName).with(DatabaseHistory.NAME, "my-db-history").with(KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, 500).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), 100).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), 50000).with(KafkaDatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS, skipUnparseableDDL).build();
    history.configure(config, null);
    history.start();
    // Should be able to call start more than once ...
    history.start();
    history.initializeStorage();
    // Calling it another time to ensure we can work with the DB history topic already existing
    history.initializeStorage();
    DdlParser recoveryParser = new DdlParserSql2003();
    DdlParser ddlParser = new DdlParserSql2003();
    // recover does this, so we need to as well
    ddlParser.setCurrentSchema("db1");
    Tables tables1 = new Tables();
    Tables tables2 = new Tables();
    Tables tables3 = new Tables();
    // Recover from the very beginning ...
    setLogPosition(0);
    history.recover(source, position, tables1, recoveryParser);
    // There should have been nothing to recover ...
    assertThat(tables1.size()).isEqualTo(0);
    // Now record schema changes, which writes out to kafka but doesn't actually change the Tables ...
    setLogPosition(10);
    ddl = "CREATE TABLE foo ( name VARCHAR(255) NOT NULL PRIMARY KEY); \n" + "CREATE TABLE customers ( id INTEGER NOT NULL PRIMARY KEY, name VARCHAR(100) NOT NULL ); \n" + "CREATE TABLE products ( productId INTEGER NOT NULL PRIMARY KEY, desc VARCHAR(255) NOT NULL); \n";
    history.record(source, position, "db1", ddl);
    // Parse the DDL statement 3x and each time update a different Tables object ...
    ddlParser.parse(ddl, tables1);
    assertThat(tables1.size()).isEqualTo(3);
    ddlParser.parse(ddl, tables2);
    assertThat(tables2.size()).isEqualTo(3);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(3);
    // Record a drop statement and parse it for 2 of our 3 Tables...
    setLogPosition(39);
    ddl = "DROP TABLE foo;";
    history.record(source, position, "db1", ddl);
    ddlParser.parse(ddl, tables2);
    assertThat(tables2.size()).isEqualTo(2);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(2);
    // Record another DDL statement and parse it for 1 of our 3 Tables...
    setLogPosition(10003);
    ddl = "CREATE TABLE suppliers ( supplierId INTEGER NOT NULL PRIMARY KEY, name VARCHAR(255) NOT NULL);";
    history.record(source, position, "db1", ddl);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(3);
    // Stop the history (which should stop the producer) ...
    history.stop();
    history = new KafkaDatabaseHistory();
    history.configure(config, null);
    // no need to start
    // Recover from the very beginning to just past the first change ...
    Tables recoveredTables = new Tables();
    setLogPosition(15);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables1);
    // Recover from the very beginning to just past the second change ...
    recoveredTables = new Tables();
    setLogPosition(50);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables2);
    // Recover from the very beginning to just past the third change ...
    recoveredTables = new Tables();
    setLogPosition(10010);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables3);
    // Recover from the very beginning to way past the third change ...
    recoveredTables = new Tables();
    setLogPosition(100000010);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables3);
}
Also used : DdlParserSql2003(io.debezium.relational.ddl.DdlParserSql2003) Configuration(io.debezium.config.Configuration) Tables(io.debezium.relational.Tables) DdlParser(io.debezium.relational.ddl.DdlParser)

Example 13 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class KafkaDatabaseHistoryTest method testExists.

@Test
public void testExists() {
    // happy path
    testHistoryTopicContent(true);
    assertTrue(history.exists());
    // Set history to use dummy topic
    Configuration config = Configuration.create().with(KafkaDatabaseHistory.BOOTSTRAP_SERVERS, kafka.brokerList()).with(KafkaDatabaseHistory.TOPIC, "dummytopic").with(DatabaseHistory.NAME, "my-db-history").with(KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, 500).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), 100).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), 50000).with(KafkaDatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS, true).build();
    history.configure(config, null);
    history.start();
    // dummytopic should not exist yet
    assertFalse(history.exists());
}
Also used : Configuration(io.debezium.config.Configuration) Test(org.junit.Test)

Example 14 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class PostgresConnectorIT method shouldProduceEventsWhenSnapshotsAreNeverAllowed.

@Test
public void shouldProduceEventsWhenSnapshotsAreNeverAllowed() throws InterruptedException {
    TestHelper.execute(SETUP_TABLES_STMT);
    Configuration config = TestHelper.defaultConfig().with(PostgresConnectorConfig.SNAPSHOT_MODE, NEVER.getValue()).with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build();
    start(PostgresConnector.class, config);
    assertConnectorIsRunning();
    waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
    // there shouldn't be any snapshot records
    assertNoRecordsToConsume();
    // insert and verify 2 new records
    TestHelper.execute(INSERT_STMT);
    assertRecordsAfterInsert(2, 2, 2);
}
Also used : Configuration(io.debezium.config.Configuration) Test(org.junit.Test) AbstractConnectorTest(io.debezium.embedded.AbstractConnectorTest)

Example 15 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class PostgresConnectorIT method shouldValidateConfiguration.

@Test
public void shouldValidateConfiguration() throws Exception {
    // use an empty configuration which should be invalid because of the lack of DB connection details
    Configuration config = Configuration.create().build();
    PostgresConnector connector = new PostgresConnector();
    Config validatedConfig = connector.validate(config.asMap());
    // validate that the required fields have errors
    assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.HOSTNAME, 1);
    assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.USER, 1);
    assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.PASSWORD, 1);
    assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.DATABASE_NAME, 1);
    // validate the non required fields
    validateField(validatedConfig, PostgresConnectorConfig.PLUGIN_NAME, LogicalDecoder.DECODERBUFS.getValue());
    validateField(validatedConfig, PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME);
    validateField(validatedConfig, PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
    validateField(validatedConfig, PostgresConnectorConfig.PORT, PostgresConnectorConfig.DEFAULT_PORT);
    validateField(validatedConfig, PostgresConnectorConfig.SERVER_NAME, null);
    validateField(validatedConfig, PostgresConnectorConfig.TOPIC_SELECTION_STRATEGY, PostgresConnectorConfig.TopicSelectionStrategy.TOPIC_PER_TABLE);
    validateField(validatedConfig, PostgresConnectorConfig.MAX_QUEUE_SIZE, PostgresConnectorConfig.DEFAULT_MAX_QUEUE_SIZE);
    validateField(validatedConfig, PostgresConnectorConfig.MAX_BATCH_SIZE, PostgresConnectorConfig.DEFAULT_MAX_BATCH_SIZE);
    validateField(validatedConfig, PostgresConnectorConfig.ROWS_FETCH_SIZE, PostgresConnectorConfig.DEFAULT_ROWS_FETCH_SIZE);
    validateField(validatedConfig, PostgresConnectorConfig.POLL_INTERVAL_MS, PostgresConnectorConfig.DEFAULT_POLL_INTERVAL_MILLIS);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_MODE, PostgresConnectorConfig.SecureConnectionMode.DISABLED);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_CLIENT_CERT, null);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_CLIENT_KEY, null);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_CLIENT_KEY_PASSWORD, null);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_ROOT_CERT, null);
    validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_WHITELIST, null);
    validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_BLACKLIST, null);
    validateField(validatedConfig, PostgresConnectorConfig.TABLE_WHITELIST, null);
    validateField(validatedConfig, PostgresConnectorConfig.TABLE_BLACKLIST, null);
    validateField(validatedConfig, PostgresConnectorConfig.COLUMN_BLACKLIST, null);
    validateField(validatedConfig, PostgresConnectorConfig.SNAPSHOT_MODE, INITIAL);
    validateField(validatedConfig, PostgresConnectorConfig.SNAPSHOT_LOCK_TIMEOUT_MS, PostgresConnectorConfig.DEFAULT_SNAPSHOT_LOCK_TIMEOUT_MILLIS);
    validateField(validatedConfig, PostgresConnectorConfig.TIME_PRECISION_MODE, TemporalPrecisionMode.ADAPTIVE);
    validateField(validatedConfig, PostgresConnectorConfig.DECIMAL_HANDLING_MODE, PostgresConnectorConfig.DecimalHandlingMode.PRECISE);
    validateField(validatedConfig, PostgresConnectorConfig.SSL_SOCKET_FACTORY, null);
    validateField(validatedConfig, PostgresConnectorConfig.TCP_KEEPALIVE, null);
}
Also used : Configuration(io.debezium.config.Configuration) Config(org.apache.kafka.common.config.Config) Test(org.junit.Test) AbstractConnectorTest(io.debezium.embedded.AbstractConnectorTest)

Aggregations

Configuration (io.debezium.config.Configuration)38 Test (org.junit.Test)21 AbstractConnectorTest (io.debezium.embedded.AbstractConnectorTest)16 Config (org.apache.kafka.common.config.Config)15 CommonConnectorConfig (io.debezium.config.CommonConnectorConfig)10 FixFor (io.debezium.doc.FixFor)6 ConnectException (org.apache.kafka.connect.errors.ConnectException)6 ConfigValue (org.apache.kafka.common.config.ConfigValue)5 HashMap (java.util.HashMap)4 JsonConverter (org.apache.kafka.connect.json.JsonConverter)4 SQLException (java.sql.SQLException)3 Map (java.util.Map)3 Field (io.debezium.config.Field)2 SchemaUtil (io.debezium.data.SchemaUtil)2 VerifyRecord (io.debezium.data.VerifyRecord)2 CompletionCallback (io.debezium.embedded.EmbeddedEngine.CompletionCallback)2 EmbeddedConfig (io.debezium.embedded.EmbeddedEngine.EmbeddedConfig)2 JdbcConnection (io.debezium.jdbc.JdbcConnection)2 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)2 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)2