Search in sources :

Example 1 with DdlParserSql2003

use of io.debezium.relational.ddl.DdlParserSql2003 in project debezium by debezium.

the class AbstractDatabaseHistoryTest method beforeEach.

@Before
public void beforeEach() {
    parser = new DdlParserSql2003();
    tables = new Tables();
    t0 = new Tables();
    t1 = new Tables();
    t2 = new Tables();
    t3 = new Tables();
    t4 = new Tables();
    all = new Tables();
    source1 = server("abc");
    source2 = server("xyz");
    history = createHistory();
}
Also used : DdlParserSql2003(io.debezium.relational.ddl.DdlParserSql2003) Tables(io.debezium.relational.Tables) Before(org.junit.Before)

Example 2 with DdlParserSql2003

use of io.debezium.relational.ddl.DdlParserSql2003 in project debezium by debezium.

the class KafkaDatabaseHistoryTest method testHistoryTopicContent.

private void testHistoryTopicContent(boolean skipUnparseableDDL) {
    // Start up the history ...
    Configuration config = Configuration.create().with(KafkaDatabaseHistory.BOOTSTRAP_SERVERS, kafka.brokerList()).with(KafkaDatabaseHistory.TOPIC, topicName).with(DatabaseHistory.NAME, "my-db-history").with(KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, 500).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), 100).with(KafkaDatabaseHistory.consumerConfigPropertyName(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), 50000).with(KafkaDatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS, skipUnparseableDDL).build();
    history.configure(config, null);
    history.start();
    // Should be able to call start more than once ...
    history.start();
    history.initializeStorage();
    // Calling it another time to ensure we can work with the DB history topic already existing
    history.initializeStorage();
    DdlParser recoveryParser = new DdlParserSql2003();
    DdlParser ddlParser = new DdlParserSql2003();
    // recover does this, so we need to as well
    ddlParser.setCurrentSchema("db1");
    Tables tables1 = new Tables();
    Tables tables2 = new Tables();
    Tables tables3 = new Tables();
    // Recover from the very beginning ...
    setLogPosition(0);
    history.recover(source, position, tables1, recoveryParser);
    // There should have been nothing to recover ...
    assertThat(tables1.size()).isEqualTo(0);
    // Now record schema changes, which writes out to kafka but doesn't actually change the Tables ...
    setLogPosition(10);
    ddl = "CREATE TABLE foo ( name VARCHAR(255) NOT NULL PRIMARY KEY); \n" + "CREATE TABLE customers ( id INTEGER NOT NULL PRIMARY KEY, name VARCHAR(100) NOT NULL ); \n" + "CREATE TABLE products ( productId INTEGER NOT NULL PRIMARY KEY, desc VARCHAR(255) NOT NULL); \n";
    history.record(source, position, "db1", ddl);
    // Parse the DDL statement 3x and each time update a different Tables object ...
    ddlParser.parse(ddl, tables1);
    assertThat(tables1.size()).isEqualTo(3);
    ddlParser.parse(ddl, tables2);
    assertThat(tables2.size()).isEqualTo(3);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(3);
    // Record a drop statement and parse it for 2 of our 3 Tables...
    setLogPosition(39);
    ddl = "DROP TABLE foo;";
    history.record(source, position, "db1", ddl);
    ddlParser.parse(ddl, tables2);
    assertThat(tables2.size()).isEqualTo(2);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(2);
    // Record another DDL statement and parse it for 1 of our 3 Tables...
    setLogPosition(10003);
    ddl = "CREATE TABLE suppliers ( supplierId INTEGER NOT NULL PRIMARY KEY, name VARCHAR(255) NOT NULL);";
    history.record(source, position, "db1", ddl);
    ddlParser.parse(ddl, tables3);
    assertThat(tables3.size()).isEqualTo(3);
    // Stop the history (which should stop the producer) ...
    history.stop();
    history = new KafkaDatabaseHistory();
    history.configure(config, null);
    // no need to start
    // Recover from the very beginning to just past the first change ...
    Tables recoveredTables = new Tables();
    setLogPosition(15);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables1);
    // Recover from the very beginning to just past the second change ...
    recoveredTables = new Tables();
    setLogPosition(50);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables2);
    // Recover from the very beginning to just past the third change ...
    recoveredTables = new Tables();
    setLogPosition(10010);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables3);
    // Recover from the very beginning to way past the third change ...
    recoveredTables = new Tables();
    setLogPosition(100000010);
    history.recover(source, position, recoveredTables, recoveryParser);
    assertThat(recoveredTables).isEqualTo(tables3);
}
Also used : DdlParserSql2003(io.debezium.relational.ddl.DdlParserSql2003) Configuration(io.debezium.config.Configuration) Tables(io.debezium.relational.Tables) DdlParser(io.debezium.relational.ddl.DdlParser)

Aggregations

Tables (io.debezium.relational.Tables)2 DdlParserSql2003 (io.debezium.relational.ddl.DdlParserSql2003)2 Configuration (io.debezium.config.Configuration)1 DdlParser (io.debezium.relational.ddl.DdlParser)1 Before (org.junit.Before)1