Search in sources :

Example 46 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class MySqlJdbcContext method readMySqlCharsetSystemVariables.

/**
 * Read the MySQL charset-related system variables.
 *
 * @param sql the reference that should be set to the SQL statement; may be null if not needed
 * @return the system variables that are related to server character sets; never null
 */
protected Map<String, String> readMySqlCharsetSystemVariables(AtomicReference<String> sql) {
    // Read the system variables from the MySQL instance and get the current database name ...
    Map<String, String> variables = new HashMap<>();
    try (JdbcConnection mysql = jdbc.connect()) {
        logger.debug("Reading MySQL charset-related system variables before parsing DDL history.");
        String statement = "SHOW VARIABLES WHERE Variable_name IN ('character_set_server','collation_server')";
        if (sql != null)
            sql.set(statement);
        mysql.query(statement, rs -> {
            while (rs.next()) {
                String varName = rs.getString(1);
                String value = rs.getString(2);
                if (varName != null && value != null) {
                    variables.put(varName, value);
                    logger.debug("\t{} = {}", Strings.pad(varName, 45, ' '), Strings.pad(value, 45, ' '));
                }
            }
        });
    } catch (SQLException e) {
        throw new ConnectException("Error reading MySQL variables: " + e.getMessage(), e);
    }
    return variables;
}
Also used : HashMap(java.util.HashMap) SQLException(java.sql.SQLException) JdbcConnection(io.debezium.jdbc.JdbcConnection) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 47 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class MySqlSchema method applyDdl.

/**
 * Apply the supplied DDL statements to this database schema and record the history. If a {@code statementConsumer} is
 * supplied, then call it for each sub-sequence of the DDL statements that all apply to the same database.
 * <p>
 * Typically DDL statements are applied using a connection to a single database, and unless the statements use fully-qualified
 * names, the DDL statements apply to this database.
 *
 * @param source the current {@link SourceInfo#partition()} and {@link SourceInfo#offset() offset} at which these changes are
 *            found; may not be null
 * @param databaseName the name of the default database under which these statements are applied; may not be null
 * @param ddlStatements the {@code ;}-separated DDL statements; may be null or empty
 * @param statementConsumer the consumer that should be called with each sub-sequence of DDL statements that apply to
 *            a single database; may be null if no action is to be performed with the changes
 * @return {@code true} if changes were made to the database schema, or {@code false} if the DDL statements had no
 *         effect on the database schema
 */
public boolean applyDdl(SourceInfo source, String databaseName, String ddlStatements, DatabaseStatementStringConsumer statementConsumer) {
    Set<TableId> changes;
    if (ignoredQueryStatements.contains(ddlStatements))
        return false;
    try {
        this.ddlChanges.reset();
        this.ddlParser.setCurrentSchema(databaseName);
        this.ddlParser.parse(ddlStatements, tables);
    } catch (ParsingException e) {
        if (skipUnparseableDDL) {
            logger.warn("Ignoring unparseable DDL statement '{}': {}", ddlStatements);
        } else {
            throw e;
        }
    } finally {
        changes = tables.drainChanges();
        // for controlling this, too
        if (!storeOnlyMonitoredTablesDdl || !changes.isEmpty()) {
            if (statementConsumer != null) {
                if (!ddlChanges.isEmpty() && ddlChanges.applyToMoreDatabasesThan(databaseName)) {
                    // We understood at least some of the DDL statements and can figure out to which database they apply.
                    // They also apply to more databases than 'databaseName', so we need to apply the DDL statements in
                    // the same order they were read for each _affected_ database, grouped together if multiple apply
                    // to the same _affected_ database...
                    ddlChanges.groupStatementStringsByDatabase((dbName, ddl) -> {
                        if (filters.databaseFilter().test(dbName) || dbName == null || "".equals(dbName)) {
                            if (dbName == null)
                                dbName = "";
                            statementConsumer.consume(dbName, ddlStatements);
                        }
                    });
                } else if (filters.databaseFilter().test(databaseName) || databaseName == null || "".equals(databaseName)) {
                    if (databaseName == null)
                        databaseName = "";
                    statementConsumer.consume(databaseName, ddlStatements);
                }
            }
            // schema change records.
            try {
                if (!storeOnlyMonitoredTablesDdl || changes.stream().anyMatch(filters().tableFilter()::test)) {
                    dbHistory.record(source.partition(), source.offset(), databaseName, ddlStatements);
                } else {
                    logger.debug("Changes for DDL '{}' were filtered and not recorded in database history", ddlStatements);
                }
            } catch (Throwable e) {
                throw new ConnectException("Error recording the DDL statement(s) in the database history " + dbHistory + ": " + ddlStatements, e);
            }
        }
    }
    // Figure out what changed ...
    changes.forEach(tableId -> {
        Table table = tables.forTable(tableId);
        if (table == null) {
            // removed
            tableSchemaByTableId.remove(tableId);
        } else {
            TableSchema schema = schemaBuilder.create(schemaPrefix, getEnvelopeSchemaName(table), table, filters.columnFilter(), filters.columnMappers());
            tableSchemaByTableId.put(tableId, schema);
        }
    });
    return true;
}
Also used : TableId(io.debezium.relational.TableId) Table(io.debezium.relational.Table) TableSchema(io.debezium.relational.TableSchema) ParsingException(io.debezium.text.ParsingException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 48 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class PostgresReplicationConnection method createReplicationStream.

private ReplicationStream createReplicationStream(final LogSequenceNumber lsn) throws SQLException {
    PGReplicationStream s;
    try {
        s = startPgReplicationStream(lsn, plugin.forceRds() ? messageDecoder::optionsWithoutMetadata : messageDecoder::optionsWithMetadata);
        messageDecoder.setContainsMetadata(plugin.forceRds() ? false : true);
    } catch (PSQLException e) {
        if (e.getMessage().matches("(?s)ERROR: option .* is unknown.*")) {
            // It is possible we are connecting to an old wal2json plug-in
            LOGGER.warn("Could not register for streaming with metadata in messages, falling back to messages without metadata");
            s = startPgReplicationStream(lsn, messageDecoder::optionsWithoutMetadata);
            messageDecoder.setContainsMetadata(false);
        } else if (e.getMessage().matches("(?s)ERROR: requested WAL segment .* has already been removed.*")) {
            LOGGER.error("Cannot rewind to last processed WAL position", e);
            throw new ConnectException("The offset to start reading from has been removed from the database write-ahead log. Create a new snapshot and consider setting of PostgreSQL parameter wal_keep_segments = 0.");
        } else {
            throw e;
        }
    }
    final PGReplicationStream stream = s;
    final long lsnLong = lsn.asLong();
    return new ReplicationStream() {

        private static final int CHECK_WARNINGS_AFTER_COUNT = 100;

        private int warningCheckCounter = CHECK_WARNINGS_AFTER_COUNT;

        // make sure this is volatile since multiple threads may be interested in this value
        private volatile LogSequenceNumber lastReceivedLSN;

        @Override
        public void read(ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
            ByteBuffer read = stream.read();
            // the lsn we started from is inclusive, so we need to avoid sending back the same message twice
            if (lsnLong >= stream.getLastReceiveLSN().asLong()) {
                return;
            }
            deserializeMessages(read, processor);
        }

        @Override
        public void readPending(ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
            ByteBuffer read = stream.readPending();
            // the lsn we started from is inclusive, so we need to avoid sending back the same message twice
            if (read == null || lsnLong >= stream.getLastReceiveLSN().asLong()) {
                return;
            }
            deserializeMessages(read, processor);
        }

        private void deserializeMessages(ByteBuffer buffer, ReplicationMessageProcessor processor) throws SQLException, InterruptedException {
            lastReceivedLSN = stream.getLastReceiveLSN();
            messageDecoder.processMessage(buffer, processor, typeRegistry);
        }

        @Override
        public void close() throws SQLException {
            processWarnings(true);
            stream.close();
        }

        @Override
        public void flushLastReceivedLsn() throws SQLException {
            if (lastReceivedLSN == null) {
                // nothing to flush yet, since we haven't read anything...
                return;
            }
            doFlushLsn(lastReceivedLSN);
        }

        @Override
        public void flushLsn(long lsn) throws SQLException {
            doFlushLsn(LogSequenceNumber.valueOf(lsn));
        }

        private void doFlushLsn(LogSequenceNumber lsn) throws SQLException {
            stream.setFlushedLSN(lsn);
            stream.setAppliedLSN(lsn);
            stream.forceUpdateStatus();
        }

        @Override
        public Long lastReceivedLsn() {
            return lastReceivedLSN != null ? lastReceivedLSN.asLong() : null;
        }

        private void processWarnings(final boolean forced) throws SQLException {
            if (--warningCheckCounter == 0 || forced) {
                warningCheckCounter = CHECK_WARNINGS_AFTER_COUNT;
                for (SQLWarning w = connection().getWarnings(); w != null; w = w.getNextWarning()) {
                    LOGGER.debug("Server-side message: '{}', state = {}, code = {}", w.getMessage(), w.getSQLState(), w.getErrorCode());
                }
            }
        }
    };
}
Also used : SQLWarning(java.sql.SQLWarning) PGReplicationStream(org.postgresql.replication.PGReplicationStream) PSQLException(org.postgresql.util.PSQLException) LogSequenceNumber(org.postgresql.replication.LogSequenceNumber) PGReplicationStream(org.postgresql.replication.PGReplicationStream) ByteBuffer(java.nio.ByteBuffer) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 49 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class PostgresConnectorTask method start.

@Override
public void start(Configuration config) {
    if (running.get()) {
        // already running
        return;
    }
    PostgresConnectorConfig connectorConfig = new PostgresConnectorConfig(config);
    // Create type registry
    TypeRegistry typeRegistry;
    try (final PostgresConnection connection = new PostgresConnection(connectorConfig.jdbcConfig())) {
        typeRegistry = connection.getTypeRegistry();
    }
    // create the task context and schema...
    TopicSelector topicSelector = TopicSelector.create(connectorConfig);
    PostgresSchema schema = new PostgresSchema(connectorConfig, typeRegistry, topicSelector);
    this.taskContext = new PostgresTaskContext(connectorConfig, schema, topicSelector);
    SourceInfo sourceInfo = new SourceInfo(connectorConfig.serverName());
    Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition());
    LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME);
    try {
        // Print out the server information
        try (PostgresConnection connection = taskContext.createConnection()) {
            logger.info(connection.serverInfo().toString());
        }
        if (existingOffset == null) {
            logger.info("No previous offset found");
            if (connectorConfig.snapshotNeverAllowed()) {
                logger.info("Snapshots are not allowed as per configuration, starting streaming logical changes only");
                producer = new RecordsStreamProducer(taskContext, sourceInfo);
            } else {
                // otherwise we always want to take a snapshot at startup
                createSnapshotProducer(taskContext, sourceInfo, connectorConfig.initialOnlySnapshot());
            }
        } else {
            sourceInfo.load(existingOffset);
            logger.info("Found previous offset {}", sourceInfo);
            if (sourceInfo.isSnapshotInEffect()) {
                if (connectorConfig.snapshotNeverAllowed()) {
                    // No snapshots are allowed
                    String msg = "The connector previously stopped while taking a snapshot, but now the connector is configured " + "to never allow snapshots. Reconfigure the connector to use snapshots initially or when needed.";
                    throw new ConnectException(msg);
                } else {
                    logger.info("Found previous incomplete snapshot");
                    createSnapshotProducer(taskContext, sourceInfo, connectorConfig.initialOnlySnapshot());
                }
            } else if (connectorConfig.alwaysTakeSnapshot()) {
                logger.info("Taking a new snapshot as per configuration");
                producer = new RecordsSnapshotProducer(taskContext, sourceInfo, true);
            } else {
                logger.info("Previous snapshot has completed successfully, streaming logical changes from last known position");
                producer = new RecordsStreamProducer(taskContext, sourceInfo);
            }
        }
        changeEventQueue = new ChangeEventQueue.Builder<ChangeEvent>().pollInterval(connectorConfig.getPollInterval()).maxBatchSize(connectorConfig.getMaxBatchSize()).maxQueueSize(connectorConfig.getMaxQueueSize()).loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME)).build();
        producer.start(changeEventQueue::enqueue, changeEventQueue::producerFailure);
        running.compareAndSet(false, true);
    } catch (SQLException e) {
        throw new ConnectException(e);
    } finally {
        previousContext.restore();
    }
}
Also used : LoggingContext(io.debezium.util.LoggingContext) SQLException(java.sql.SQLException) PostgresConnection(io.debezium.connector.postgresql.connection.PostgresConnection) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 50 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class AbstractReader method wrap.

/**
 * Wraps the specified exception in a {@link ConnectException}, ensuring that all useful state is captured inside
 * the new exception's message.
 *
 * @param error the exception; may not be null
 * @return the wrapped Kafka Connect exception
 */
protected ConnectException wrap(Throwable error) {
    assert error != null;
    String msg = error.getMessage();
    if (error instanceof ServerException) {
        ServerException e = (ServerException) error;
        msg = msg + " Error code: " + e.getErrorCode() + "; SQLSTATE: " + e.getSqlState() + ".";
    } else if (error instanceof SQLException) {
        SQLException e = (SQLException) error;
        msg = e.getMessage() + " Error code: " + e.getErrorCode() + "; SQLSTATE: " + e.getSQLState() + ".";
    }
    return new ConnectException(msg, error);
}
Also used : ServerException(com.github.shyiko.mysql.binlog.network.ServerException) SQLException(java.sql.SQLException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Aggregations

ConnectException (org.apache.kafka.connect.errors.ConnectException)184 HashMap (java.util.HashMap)38 IOException (java.io.IOException)28 Map (java.util.Map)28 ArrayList (java.util.ArrayList)23 Test (org.junit.Test)23 ExecutionException (java.util.concurrent.ExecutionException)22 TimeoutException (java.util.concurrent.TimeoutException)17 SQLException (java.sql.SQLException)16 SourceRecord (org.apache.kafka.connect.source.SourceRecord)14 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Connector (org.apache.kafka.connect.connector.Connector)12 ConfigException (org.apache.kafka.common.config.ConfigException)11 TopicPartition (org.apache.kafka.common.TopicPartition)10 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)10 Collection (java.util.Collection)8 HashSet (java.util.HashSet)8 Set (java.util.Set)8 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)8 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)8