Search in sources :

Example 51 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class AbstractReader method failed.

/**
 * Call this method only when the reader has failed, that a subsequent call to {@link #poll()} should throw
 * this error, and that {@link #doCleanup()} can be called at any time.
 *
 * @param error the error that resulted in the failure; should not be {@code null}
 * @param msg the error message; may not be null
 */
protected void failed(Throwable error, String msg) {
    ConnectException wrapped = wrap(error);
    this.logger.error("Failed due to error: {}", msg, wrapped);
    this.failure.set(wrapped);
}
Also used : ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 52 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class BinlogReader method doStart.

@Override
protected void doStart() {
    // Register our event handlers ...
    eventHandlers.put(EventType.STOP, this::handleServerStop);
    eventHandlers.put(EventType.HEARTBEAT, this::handleServerHeartbeat);
    eventHandlers.put(EventType.INCIDENT, this::handleServerIncident);
    eventHandlers.put(EventType.ROTATE, this::handleRotateLogsEvent);
    eventHandlers.put(EventType.TABLE_MAP, this::handleUpdateTableMetadata);
    eventHandlers.put(EventType.QUERY, this::handleQueryEvent);
    eventHandlers.put(EventType.WRITE_ROWS, this::handleInsert);
    eventHandlers.put(EventType.UPDATE_ROWS, this::handleUpdate);
    eventHandlers.put(EventType.DELETE_ROWS, this::handleDelete);
    eventHandlers.put(EventType.EXT_WRITE_ROWS, this::handleInsert);
    eventHandlers.put(EventType.EXT_UPDATE_ROWS, this::handleUpdate);
    eventHandlers.put(EventType.EXT_DELETE_ROWS, this::handleDelete);
    eventHandlers.put(EventType.VIEW_CHANGE, this::viewChange);
    eventHandlers.put(EventType.XA_PREPARE, this::prepareTransaction);
    eventHandlers.put(EventType.XID, this::handleTransactionCompletion);
    // Get the current GtidSet from MySQL so we can get a filtered/merged GtidSet based off of the last Debezium checkpoint.
    String availableServerGtidStr = connectionContext.knownGtidSet();
    if (availableServerGtidStr != null && !availableServerGtidStr.trim().isEmpty()) {
        // The server is using GTIDs, so enable the handler ...
        eventHandlers.put(EventType.GTID, this::handleGtidEvent);
        // Now look at the GTID set from the server and what we've previously seen ...
        GtidSet availableServerGtidSet = new GtidSet(availableServerGtidStr);
        GtidSet filteredGtidSet = context.filterGtidSet(availableServerGtidSet);
        if (filteredGtidSet != null) {
            // We've seen at least some GTIDs, so start reading from the filtered GTID set ...
            logger.info("Registering binlog reader with GTID set: {}", filteredGtidSet);
            String filteredGtidSetStr = filteredGtidSet.toString();
            client.setGtidSet(filteredGtidSetStr);
            source.setCompletedGtidSet(filteredGtidSetStr);
            gtidSet = new com.github.shyiko.mysql.binlog.GtidSet(filteredGtidSetStr);
        } else {
            // We've not yet seen any GTIDs, so that means we have to start reading the binlog from the beginning ...
            client.setBinlogFilename(source.binlogFilename());
            client.setBinlogPosition(source.binlogPosition());
            gtidSet = new com.github.shyiko.mysql.binlog.GtidSet("");
        }
    } else {
        // The server is not using GTIDs, so start reading the binlog based upon where we last left off ...
        client.setBinlogFilename(source.binlogFilename());
        client.setBinlogPosition(source.binlogPosition());
    }
    // We may be restarting in the middle of a transaction, so see how far into the transaction we have already processed...
    initialEventsToSkip = source.eventsToSkipUponRestart();
    // Set the starting row number, which is the next row number to be read ...
    startingRowNumber = source.rowsToSkipUponRestart();
    // Only when we reach the first BEGIN event will we start to skip events ...
    skipEvent = false;
    // Initial our poll output delay logic ...
    pollOutputDelay.hasElapsed();
    previousOutputMillis = clock.currentTimeInMillis();
    // Start the log reader, which starts background threads ...
    if (isRunning()) {
        long timeoutInMilliseconds = context.timeoutInMilliseconds();
        long started = context.getClock().currentTimeInMillis();
        try {
            logger.debug("Attempting to establish binlog reader connection with timeout of {} ms", timeoutInMilliseconds);
            client.connect(context.timeoutInMilliseconds());
        } catch (TimeoutException e) {
            // If the client thread is interrupted *before* the client could connect, the client throws a timeout exception
            // The only way we can distinguish this is if we get the timeout exception before the specified timeout has
            // elapsed, so we simply check this (within 10%) ...
            long duration = context.getClock().currentTimeInMillis() - started;
            if (duration > (0.9 * context.timeoutInMilliseconds())) {
                double actualSeconds = TimeUnit.MILLISECONDS.toSeconds(duration);
                throw new ConnectException("Timed out after " + actualSeconds + " seconds while waiting to connect to MySQL at " + connectionContext.hostname() + ":" + connectionContext.port() + " with user '" + connectionContext.username() + "'", e);
            }
        // Otherwise, we were told to shutdown, so we don't care about the timeout exception
        } catch (AuthenticationException e) {
            throw new ConnectException("Failed to authenticate to the MySQL database at " + connectionContext.hostname() + ":" + connectionContext.port() + " with user '" + connectionContext.username() + "'", e);
        } catch (Throwable e) {
            throw new ConnectException("Unable to connect to the MySQL database at " + connectionContext.hostname() + ":" + connectionContext.port() + " with user '" + connectionContext.username() + "': " + e.getMessage(), e);
        }
    }
}
Also used : AuthenticationException(com.github.shyiko.mysql.binlog.network.AuthenticationException) TimeoutException(java.util.concurrent.TimeoutException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 53 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class MySqlConnectorTask method start.

@Override
public synchronized void start(Configuration config) {
    // Create and start the task context ...
    this.taskContext = new MySqlTaskContext(config);
    this.connectionContext = taskContext.getConnectionContext();
    PreviousContext prevLoggingContext = this.taskContext.configureLoggingContext("task");
    try {
        this.taskContext.start();
        // Get the offsets for our partition ...
        boolean startWithSnapshot = false;
        boolean snapshotEventsAreInserts = true;
        final SourceInfo source = taskContext.source();
        Map<String, ?> offsets = context.offsetStorageReader().offset(taskContext.source().partition());
        if (offsets != null) {
            // Set the position in our source info ...
            source.setOffset(offsets);
            logger.info("Found existing offset: {}", offsets);
            // First check if db history is available
            if (!taskContext.historyExists()) {
                if (taskContext.isSchemaOnlyRecoverySnapshot()) {
                    startWithSnapshot = true;
                    // But check to see if the server still has those binlog coordinates ...
                    if (!isBinlogAvailable()) {
                        String msg = "The connector is trying to read binlog starting at " + source + ", but this is no longer " + "available on the server. Reconfigure the connector to use a snapshot when needed.";
                        throw new ConnectException(msg);
                    }
                    logger.info("The db-history topic is missing but we are in {} snapshot mode. " + "Attempting to snapshot the current schema and then begin reading the binlog from the last recorded offset.", SnapshotMode.SCHEMA_ONLY_RECOVERY);
                } else {
                    String msg = "The db history topic is missing. You may attempt to recover it by reconfiguring the connector to " + SnapshotMode.SCHEMA_ONLY_RECOVERY;
                    throw new ConnectException(msg);
                }
                taskContext.initializeHistoryStorage();
            } else {
                // Before anything else, recover the database history to the specified binlog coordinates ...
                taskContext.loadHistory(source);
                if (source.isSnapshotInEffect()) {
                    // The last offset was an incomplete snapshot that we cannot recover from...
                    if (taskContext.isSnapshotNeverAllowed()) {
                        // No snapshots are allowed
                        String msg = "The connector previously stopped while taking a snapshot, but now the connector is configured " + "to never allow snapshots. Reconfigure the connector to use snapshots initially or when needed.";
                        throw new ConnectException(msg);
                    }
                    // Otherwise, restart a new snapshot ...
                    startWithSnapshot = true;
                    logger.info("Prior execution was an incomplete snapshot, so starting new snapshot");
                } else {
                    // No snapshot was in effect, so we should just start reading from the binlog ...
                    startWithSnapshot = false;
                    // But check to see if the server still has those binlog coordinates ...
                    if (!isBinlogAvailable()) {
                        if (!taskContext.isSnapshotAllowedWhenNeeded()) {
                            String msg = "The connector is trying to read binlog starting at " + source + ", but this is no longer " + "available on the server. Reconfigure the connector to use a snapshot when needed.";
                            throw new ConnectException(msg);
                        }
                        startWithSnapshot = true;
                    }
                }
            }
        } else {
            // We have no recorded offsets ...
            taskContext.initializeHistoryStorage();
            if (taskContext.isSnapshotNeverAllowed()) {
                // We're not allowed to take a snapshot, so instead we have to assume that the binlog contains the
                // full history of the database.
                logger.info("Found no existing offset and snapshots disallowed, so starting at beginning of binlog");
                // start from the beginning of the binlog
                source.setBinlogStartPoint("", 0L);
                taskContext.initializeHistory();
                // Look to see what the first available binlog file is called, and whether it looks like binlog files have
                // been purged. If so, then output a warning ...
                String earliestBinlogFilename = earliestBinlogFilename();
                if (earliestBinlogFilename == null) {
                    logger.warn("No binlog appears to be available. Ensure that the MySQL row-level binlog is enabled.");
                } else if (!earliestBinlogFilename.endsWith("00001")) {
                    logger.warn("It is possible the server has purged some binlogs. If this is the case, then using snapshot mode may be required.");
                }
            } else {
                // We are allowed to use snapshots, and that is the best way to start ...
                startWithSnapshot = true;
                // The snapshot will determine if GTIDs are set
                logger.info("Found no existing offset, so preparing to perform a snapshot");
            // The snapshot will also initialize history ...
            }
        }
        if (!startWithSnapshot && source.gtidSet() == null && isGtidModeEnabled()) {
            // The snapshot will properly determine the GTID set, but we're not starting with a snapshot and GTIDs were not
            // previously used but the MySQL server has them enabled ...
            source.setCompletedGtidSet("");
        }
        // Check whether the row-level binlog is enabled ...
        final boolean rowBinlogEnabled = isRowBinlogEnabled();
        ChainedReader.Builder chainedReaderBuilder = new ChainedReader.Builder();
        // Set up the readers, with a callback to `completeReaders` so that we know when it is finished ...
        BinlogReader binlogReader = new BinlogReader("binlog", taskContext);
        if (startWithSnapshot) {
            // We're supposed to start with a snapshot, so set that up ...
            SnapshotReader snapshotReader = new SnapshotReader("snapshot", taskContext);
            if (snapshotEventsAreInserts)
                snapshotReader.generateInsertEvents();
            chainedReaderBuilder.addReader(snapshotReader);
            if (taskContext.isInitialSnapshotOnly()) {
                logger.warn("This connector will only perform a snapshot, and will stop after that completes.");
                chainedReaderBuilder.addReader(new BlockingReader("blocker"));
                chainedReaderBuilder.completionMessage("Connector configured to only perform snapshot, and snapshot completed successfully. Connector will terminate.");
            } else {
                if (!rowBinlogEnabled) {
                    throw new ConnectException("The MySQL server is not configured to use a row-level binlog, which is " + "required for this connector to work properly. Change the MySQL configuration to use a " + "row-level binlog and restart the connector.");
                }
                chainedReaderBuilder.addReader(binlogReader);
            }
        } else {
            if (!rowBinlogEnabled) {
                throw new ConnectException("The MySQL server does not appear to be using a row-level binlog, which is required for this connector to work properly. Enable this mode and restart the connector.");
            }
            // We're going to start by reading the binlog ...
            chainedReaderBuilder.addReader(binlogReader);
        }
        readers = chainedReaderBuilder.build();
        readers.uponCompletion(this::completeReaders);
        // And finally initialize and start the chain of readers ...
        this.readers.initialize();
        this.readers.start();
    } catch (Throwable e) {
        // run into a problem, we have to stop ourselves ...
        try {
            stop();
        } catch (Throwable s) {
            // Log, but don't propagate ...
            logger.error("Failed to start the connector (see other exception), but got this error while cleaning up", s);
        }
        if (e instanceof InterruptedException) {
            Thread.interrupted();
            throw new ConnectException("Interrupted while starting the connector", e);
        }
        if (e instanceof ConnectException) {
            throw (ConnectException) e;
        }
        throw new ConnectException(e);
    } finally {
        prevLoggingContext.restore();
    }
}
Also used : PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 54 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class MySqlConnectorTask method isRowBinlogEnabled.

/**
 * Determine whether the MySQL server has the row-level binlog enabled.
 *
 * @return {@code true} if the server's {@code binlog_format} is set to {@code ROW}, or {@code false} otherwise
 */
protected boolean isRowBinlogEnabled() {
    AtomicReference<String> mode = new AtomicReference<String>("");
    try {
        connectionContext.jdbc().query("SHOW GLOBAL VARIABLES LIKE 'binlog_format'", rs -> {
            if (rs.next()) {
                mode.set(rs.getString(2));
            }
        });
    } catch (SQLException e) {
        throw new ConnectException("Unexpected error while connecting to MySQL and looking at BINLOG mode: ", e);
    }
    logger.debug("binlog_format={}", mode.get());
    return "ROW".equalsIgnoreCase(mode.get());
}
Also used : SQLException(java.sql.SQLException) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 55 with ConnectException

use of org.apache.kafka.connect.errors.ConnectException in project debezium by debezium.

the class ConnectionContext method clientForPrimary.

/**
 * Obtain a client that talks only to the primary node of the replica set.
 *
 * @param replicaSet the replica set information; may not be null
 * @return the client, or {@code null} if no primary could be found for the replica set
 */
protected MongoClient clientForPrimary(ReplicaSet replicaSet) {
    MongoClient replicaSetClient = clientForReplicaSet(replicaSet);
    ReplicaSetStatus rsStatus = replicaSetClient.getReplicaSetStatus();
    if (rsStatus == null) {
        if (!this.useHostsAsSeeds) {
            // No replica set status is available, but it may still be a replica set ...
            return replicaSetClient;
        }
        // This is not a replica set, so there will be no oplog to read ...
        throw new ConnectException("The MongoDB server(s) at '" + replicaSet + "' is not a valid replica set and cannot be used");
    }
    // It is a replica set ...
    ServerAddress primaryAddress = rsStatus.getMaster();
    if (primaryAddress != null) {
        return pool.clientFor(primaryAddress);
    }
    return null;
}
Also used : MongoClient(com.mongodb.MongoClient) ServerAddress(com.mongodb.ServerAddress) ReplicaSetStatus(com.mongodb.ReplicaSetStatus) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Aggregations

ConnectException (org.apache.kafka.connect.errors.ConnectException)184 HashMap (java.util.HashMap)38 IOException (java.io.IOException)28 Map (java.util.Map)28 ArrayList (java.util.ArrayList)23 Test (org.junit.Test)23 ExecutionException (java.util.concurrent.ExecutionException)22 TimeoutException (java.util.concurrent.TimeoutException)17 SQLException (java.sql.SQLException)16 SourceRecord (org.apache.kafka.connect.source.SourceRecord)14 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Connector (org.apache.kafka.connect.connector.Connector)12 ConfigException (org.apache.kafka.common.config.ConfigException)11 TopicPartition (org.apache.kafka.common.TopicPartition)10 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)10 Collection (java.util.Collection)8 HashSet (java.util.HashSet)8 Set (java.util.Set)8 NotFoundException (org.apache.kafka.connect.errors.NotFoundException)8 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)8