Search in sources :

Example 1 with PreviousContext

use of io.debezium.util.LoggingContext.PreviousContext in project debezium by debezium.

the class ConnectorOutputTest method runConnector.

/**
 * Run the connector described by the supplied test specification.
 *
 * @param spec the test specification
 * @param callback the function that should be called when the connector is stopped
 */
protected void runConnector(TestSpecification spec, CompletionCallback callback) {
    PreviousContext preRunContext = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
    final Configuration environmentConfig = Configuration.copy(spec.environment()).build();
    final Configuration connectorConfig = spec.config();
    String[] ignorableFieldNames = environmentConfig.getString(ENV_IGNORE_FIELDS, "").split(",");
    final Set<String> ignorableFields = Arrays.stream(ignorableFieldNames).map(String::trim).collect(Collectors.toSet());
    String[] globallyIgnorableFieldNames = globallyIgnorableFieldNames();
    if (globallyIgnorableFieldNames != null && globallyIgnorableFieldNames.length != 0) {
        ignorableFields.addAll(Arrays.stream(globallyIgnorableFieldNames).map(String::trim).collect(Collectors.toSet()));
    }
    final SchemaAndValueConverter keyConverter = new SchemaAndValueConverter(environmentConfig, true);
    final SchemaAndValueConverter valueConverter = new SchemaAndValueConverter(environmentConfig, false);
    final TestData testData = spec.testData();
    // Get any special comparators ...
    final Map<String, RecordValueComparator> comparatorsByFieldName = new HashMap<>();
    addValueComparatorsByFieldPath(comparatorsByFieldName::put);
    final Map<String, RecordValueComparator> comparatorsBySchemaName = new HashMap<>();
    addValueComparatorsBySchemaName(comparatorsBySchemaName::put);
    RuntimeException runError = null;
    CompletionResult problem = new CompletionResult(callback);
    try {
        // Set up the test data ...
        final PreviewIterator<Document> expectedRecords = Iterators.preview(testData.read());
        final Consumer<Document> recorder = testData::write;
        // We need something that will measure the amount of time since our consumer has seen a record ...
        TimeSince timeSinceLastRecord = Threads.timeSince(Clock.SYSTEM);
        // We'll keep the last 10 expected and actual records so that there is some context if they don't match ...
        Queue<SourceRecord> actualRecordHistory = fixedSizeQueue(10);
        Queue<SourceRecord> expectedRecordHistory = fixedSizeQueue(10);
        // Define what happens for each record ...
        ConsumerCompletion result = new ConsumerCompletion();
        Consumer<SourceRecord> consumer = (actualRecord) -> {
            PreviousContext prev = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
            try {
                Testing.debug("actual record:    " + SchemaUtil.asString(actualRecord));
                timeSinceLastRecord.reset();
                // Record the actual in the history ...
                actualRecordHistory.add(actualRecord);
                // And possibly hand it to the test's recorder ...
                try {
                    Document jsonRecord = serializeSourceRecord(actualRecord, keyConverter, valueConverter);
                    if (jsonRecord != null)
                        recorder.accept(jsonRecord);
                } catch (IOException e) {
                    String msg = "Error converting JSON to SourceRecord";
                    Testing.debug(msg);
                    throw new ConnectException(msg, e);
                }
                if (expectedRecords != null) {
                    // Get the test's next expected record ...
                    if (!expectedRecords.hasNext()) {
                        // We received an actual record but don't have or expect one ...
                        String msg = "Source record found but nothing expected";
                        result.error();
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    }
                    Document expected = expectedRecords.next();
                    if (isEndCommand(expected)) {
                        result.error();
                        String msg = "Source record was found but not expected: " + SchemaUtil.asString(actualRecord);
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    } else if (isCommand(expected)) {
                        Testing.debug("applying command: " + SchemaUtil.asString(expected));
                        applyCommand(expected, result);
                    } else {
                        try {
                            // Otherwise, build a record from the expected and add it to the history ...
                            SourceRecord expectedRecord = rehydrateSourceRecord(expected, keyConverter, valueConverter);
                            expectedRecordHistory.add(expectedRecord);
                            Testing.debug("expected record:  " + SchemaUtil.asString(expectedRecord));
                            // And compare the records ...
                            try {
                                assertSourceRecordMatch(actualRecord, expectedRecord, ignorableFields::contains, comparatorsByFieldName, comparatorsBySchemaName);
                            } catch (AssertionError e) {
                                result.error();
                                String msg = "Source record with key " + SchemaUtil.asString(actualRecord.key()) + " did not match expected record: " + e.getMessage();
                                Testing.debug(msg);
                                throw new MismatchRecordException(e, msg, actualRecordHistory, expectedRecordHistory);
                            }
                        } catch (IOException e) {
                            result.exception();
                            String msg = "Error converting JSON to SourceRecord";
                            Testing.debug(msg);
                            throw new ConnectException(msg, e);
                        }
                    }
                    if (!expectedRecords.hasNext()) {
                        // We expect no more records, so stop the connector ...
                        result.stop();
                        String msg = "Stopping connector after no more expected records found";
                        Testing.debug(msg);
                        throw new StopConnectorException(msg);
                    }
                    // Peek at the next record to see if it is a command ...
                    Document nextExpectedRecord = expectedRecords.peek();
                    if (isCommand(nextExpectedRecord)) {
                        // consume it and apply it ...
                        applyCommand(expectedRecords.next(), result);
                    }
                }
            } finally {
                prev.restore();
            }
        };
        // Set up the configuration for the engine to include the connector configuration and apply as defaults
        // the environment and engine parameters ...
        Configuration engineConfig = Configuration.copy(connectorConfig).withDefault(environmentConfig).withDefault(EmbeddedEngine.ENGINE_NAME, spec.name()).withDefault(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).withDefault(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
        // Create the engine ...
        EmbeddedEngine engine = EmbeddedEngine.create().using(engineConfig).notifying(consumer).using(this.getClass().getClassLoader()).using(problem).build();
        long connectorTimeoutInSeconds = environmentConfig.getLong(ENV_CONNECTOR_TIMEOUT_IN_SECONDS, 10);
        // Get ready to run the connector one or more times ...
        do {
            // Each time create a thread that will stop our connector if we don't get enough results
            Thread timeoutThread = Threads.timeout(spec.name() + "-connector-output", connectorTimeoutInSeconds, TimeUnit.SECONDS, timeSinceLastRecord, engine::stop);
            // But plan to stop our timeout thread as soon as the connector completes ...
            result.uponCompletion(timeoutThread::interrupt);
            timeoutThread.start();
            // Run the connector and block until the connector is stopped by the timeout thread or until
            // an exception is thrown within the connector (perhaps by the consumer) ...
            Testing.debug("Starting connector");
            result.reset();
            engine.run();
        } while (result.get() == ExecutionResult.RESTART_REQUESTED);
    } catch (IOException e) {
        runError = new RuntimeException("Error reading test data: " + e.getMessage(), e);
    } catch (RuntimeException t) {
        runError = t;
    } finally {
        // And clean up everything ...
        try {
            testData.close();
        } catch (IOException e) {
            if (runError != null) {
                runError = new RuntimeException("Error closing test data: " + e.getMessage(), e);
            }
        } finally {
            try {
                keyConverter.close();
            } finally {
                try {
                    valueConverter.close();
                } finally {
                    preRunContext.restore();
                }
            }
        }
    }
    if (runError != null) {
        throw runError;
    }
    if (problem.hasError()) {
        Throwable error = problem.error();
        if (error instanceof AssertionError) {
            fail(problem.message());
        } else if (error instanceof MismatchRecordException) {
            MismatchRecordException mismatch = (MismatchRecordException) error;
            LinkedList<SourceRecord> actualHistory = mismatch.getActualRecords();
            LinkedList<SourceRecord> expectedHistory = mismatch.getExpectedRecords();
            Testing.print("");
            Testing.print("FAILURE in connector integration test '" + spec.name() + "' in class " + getClass());
            Testing.print(" actual record:   " + SchemaUtil.asString(actualHistory.getLast()));
            Testing.print(" expected record: " + SchemaUtil.asString(expectedHistory.getLast()));
            Testing.print(mismatch.getMessage());
            Testing.print("");
            AssertionError cause = ((MismatchRecordException) error).getError();
            if (cause != null) {
                throw cause;
            }
            fail(problem.message());
        } else if (error instanceof RuntimeException) {
            throw (RuntimeException) error;
        } else {
            throw new RuntimeException(error);
        }
    }
}
Also used : Arrays(java.util.Arrays) PreviewIterator(io.debezium.util.Iterators.PreviewIterator) Threads(io.debezium.util.Threads) Schema(org.apache.kafka.connect.data.Schema) LoggingContext(io.debezium.util.LoggingContext) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) Map(java.util.Map) After(org.junit.After) Assert.fail(org.junit.Assert.fail) JsonNode(com.fasterxml.jackson.databind.JsonNode) Path(java.nio.file.Path) DocumentReader(io.debezium.document.DocumentReader) Predicate(java.util.function.Predicate) Set(java.util.Set) Collectors(java.util.stream.Collectors) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ArrayReader(io.debezium.document.ArrayReader) StandardCharsets(java.nio.charset.StandardCharsets) List(java.util.List) Queue(java.util.Queue) JsonConverter(org.apache.kafka.connect.json.JsonConverter) SchemaUtil(io.debezium.data.SchemaUtil) Value(io.debezium.document.Value) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) Array(io.debezium.document.Array) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ArrayList(java.util.ArrayList) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Document(io.debezium.document.Document) BiConsumer(java.util.function.BiConsumer) ArrayWriter(io.debezium.document.ArrayWriter) LinkedList(java.util.LinkedList) Strings(io.debezium.util.Strings) Before(org.junit.Before) OutputStream(java.io.OutputStream) Properties(java.util.Properties) Iterators(io.debezium.util.Iterators) Iterator(java.util.Iterator) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) Configuration(io.debezium.config.Configuration) FileInputStream(java.io.FileInputStream) CompletionCallback(io.debezium.embedded.EmbeddedEngine.CompletionCallback) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) Testing(io.debezium.util.Testing) Paths(java.nio.file.Paths) Collect(io.debezium.util.Collect) ConnectException(org.apache.kafka.connect.errors.ConnectException) VerifyRecord(io.debezium.data.VerifyRecord) Clock(io.debezium.util.Clock) TimeSince(io.debezium.util.Threads.TimeSince) IoUtil(io.debezium.util.IoUtil) InputStream(java.io.InputStream) Configuration(io.debezium.config.Configuration) HashMap(java.util.HashMap) Document(io.debezium.document.Document) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConnectException(org.apache.kafka.connect.errors.ConnectException) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) TimeSince(io.debezium.util.Threads.TimeSince)

Example 2 with PreviousContext

use of io.debezium.util.LoggingContext.PreviousContext in project debezium by debezium.

the class MySqlConnectorTask method start.

@Override
public synchronized void start(Configuration config) {
    // Create and start the task context ...
    this.taskContext = new MySqlTaskContext(config);
    this.connectionContext = taskContext.getConnectionContext();
    PreviousContext prevLoggingContext = this.taskContext.configureLoggingContext("task");
    try {
        this.taskContext.start();
        // Get the offsets for our partition ...
        boolean startWithSnapshot = false;
        boolean snapshotEventsAreInserts = true;
        final SourceInfo source = taskContext.source();
        Map<String, ?> offsets = context.offsetStorageReader().offset(taskContext.source().partition());
        if (offsets != null) {
            // Set the position in our source info ...
            source.setOffset(offsets);
            logger.info("Found existing offset: {}", offsets);
            // First check if db history is available
            if (!taskContext.historyExists()) {
                if (taskContext.isSchemaOnlyRecoverySnapshot()) {
                    startWithSnapshot = true;
                    // But check to see if the server still has those binlog coordinates ...
                    if (!isBinlogAvailable()) {
                        String msg = "The connector is trying to read binlog starting at " + source + ", but this is no longer " + "available on the server. Reconfigure the connector to use a snapshot when needed.";
                        throw new ConnectException(msg);
                    }
                    logger.info("The db-history topic is missing but we are in {} snapshot mode. " + "Attempting to snapshot the current schema and then begin reading the binlog from the last recorded offset.", SnapshotMode.SCHEMA_ONLY_RECOVERY);
                } else {
                    String msg = "The db history topic is missing. You may attempt to recover it by reconfiguring the connector to " + SnapshotMode.SCHEMA_ONLY_RECOVERY;
                    throw new ConnectException(msg);
                }
                taskContext.initializeHistoryStorage();
            } else {
                // Before anything else, recover the database history to the specified binlog coordinates ...
                taskContext.loadHistory(source);
                if (source.isSnapshotInEffect()) {
                    // The last offset was an incomplete snapshot that we cannot recover from...
                    if (taskContext.isSnapshotNeverAllowed()) {
                        // No snapshots are allowed
                        String msg = "The connector previously stopped while taking a snapshot, but now the connector is configured " + "to never allow snapshots. Reconfigure the connector to use snapshots initially or when needed.";
                        throw new ConnectException(msg);
                    }
                    // Otherwise, restart a new snapshot ...
                    startWithSnapshot = true;
                    logger.info("Prior execution was an incomplete snapshot, so starting new snapshot");
                } else {
                    // No snapshot was in effect, so we should just start reading from the binlog ...
                    startWithSnapshot = false;
                    // But check to see if the server still has those binlog coordinates ...
                    if (!isBinlogAvailable()) {
                        if (!taskContext.isSnapshotAllowedWhenNeeded()) {
                            String msg = "The connector is trying to read binlog starting at " + source + ", but this is no longer " + "available on the server. Reconfigure the connector to use a snapshot when needed.";
                            throw new ConnectException(msg);
                        }
                        startWithSnapshot = true;
                    }
                }
            }
        } else {
            // We have no recorded offsets ...
            taskContext.initializeHistoryStorage();
            if (taskContext.isSnapshotNeverAllowed()) {
                // We're not allowed to take a snapshot, so instead we have to assume that the binlog contains the
                // full history of the database.
                logger.info("Found no existing offset and snapshots disallowed, so starting at beginning of binlog");
                // start from the beginning of the binlog
                source.setBinlogStartPoint("", 0L);
                taskContext.initializeHistory();
                // Look to see what the first available binlog file is called, and whether it looks like binlog files have
                // been purged. If so, then output a warning ...
                String earliestBinlogFilename = earliestBinlogFilename();
                if (earliestBinlogFilename == null) {
                    logger.warn("No binlog appears to be available. Ensure that the MySQL row-level binlog is enabled.");
                } else if (!earliestBinlogFilename.endsWith("00001")) {
                    logger.warn("It is possible the server has purged some binlogs. If this is the case, then using snapshot mode may be required.");
                }
            } else {
                // We are allowed to use snapshots, and that is the best way to start ...
                startWithSnapshot = true;
                // The snapshot will determine if GTIDs are set
                logger.info("Found no existing offset, so preparing to perform a snapshot");
            // The snapshot will also initialize history ...
            }
        }
        if (!startWithSnapshot && source.gtidSet() == null && isGtidModeEnabled()) {
            // The snapshot will properly determine the GTID set, but we're not starting with a snapshot and GTIDs were not
            // previously used but the MySQL server has them enabled ...
            source.setCompletedGtidSet("");
        }
        // Check whether the row-level binlog is enabled ...
        final boolean rowBinlogEnabled = isRowBinlogEnabled();
        ChainedReader.Builder chainedReaderBuilder = new ChainedReader.Builder();
        // Set up the readers, with a callback to `completeReaders` so that we know when it is finished ...
        BinlogReader binlogReader = new BinlogReader("binlog", taskContext);
        if (startWithSnapshot) {
            // We're supposed to start with a snapshot, so set that up ...
            SnapshotReader snapshotReader = new SnapshotReader("snapshot", taskContext);
            if (snapshotEventsAreInserts)
                snapshotReader.generateInsertEvents();
            chainedReaderBuilder.addReader(snapshotReader);
            if (taskContext.isInitialSnapshotOnly()) {
                logger.warn("This connector will only perform a snapshot, and will stop after that completes.");
                chainedReaderBuilder.addReader(new BlockingReader("blocker"));
                chainedReaderBuilder.completionMessage("Connector configured to only perform snapshot, and snapshot completed successfully. Connector will terminate.");
            } else {
                if (!rowBinlogEnabled) {
                    throw new ConnectException("The MySQL server is not configured to use a row-level binlog, which is " + "required for this connector to work properly. Change the MySQL configuration to use a " + "row-level binlog and restart the connector.");
                }
                chainedReaderBuilder.addReader(binlogReader);
            }
        } else {
            if (!rowBinlogEnabled) {
                throw new ConnectException("The MySQL server does not appear to be using a row-level binlog, which is required for this connector to work properly. Enable this mode and restart the connector.");
            }
            // We're going to start by reading the binlog ...
            chainedReaderBuilder.addReader(binlogReader);
        }
        readers = chainedReaderBuilder.build();
        readers.uponCompletion(this::completeReaders);
        // And finally initialize and start the chain of readers ...
        this.readers.initialize();
        this.readers.start();
    } catch (Throwable e) {
        // run into a problem, we have to stop ourselves ...
        try {
            stop();
        } catch (Throwable s) {
            // Log, but don't propagate ...
            logger.error("Failed to start the connector (see other exception), but got this error while cleaning up", s);
        }
        if (e instanceof InterruptedException) {
            Thread.interrupted();
            throw new ConnectException("Interrupted while starting the connector", e);
        }
        if (e instanceof ConnectException) {
            throw (ConnectException) e;
        }
        throw new ConnectException(e);
    } finally {
        prevLoggingContext.restore();
    }
}
Also used : PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 3 with PreviousContext

use of io.debezium.util.LoggingContext.PreviousContext in project debezium by debezium.

the class MySqlConnectorTask method stop.

@Override
public synchronized void stop() {
    if (context != null) {
        PreviousContext prevLoggingContext = this.taskContext.configureLoggingContext("task");
        try {
            logger.info("Stopping MySQL connector task");
            if (readers != null) {
                readers.stop();
                readers.destroy();
            }
        } finally {
            prevLoggingContext.restore();
        }
    }
}
Also used : PreviousContext(io.debezium.util.LoggingContext.PreviousContext)

Example 4 with PreviousContext

use of io.debezium.util.LoggingContext.PreviousContext in project debezium by debezium.

the class MongoDbConnector method taskConfigs.

@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
    PreviousContext previousLogContext = taskContext.configureLoggingContext("conn");
    try {
        if (config == null) {
            logger.error("Configuring a maximum of {} tasks with no connector configuration available", maxTasks);
            return Collections.emptyList();
        }
        // Partitioning the replica sets amongst the number of tasks ...
        List<Map<String, String>> taskConfigs = new ArrayList<>(maxTasks);
        ReplicaSets replicaSets = monitorThread.getReplicaSets(10, TimeUnit.SECONDS);
        if (replicaSets != null) {
            logger.info("Subdividing {} MongoDB replica set(s) into at most {} task(s)", replicaSets.replicaSetCount(), maxTasks);
            replicaSets.subdivide(maxTasks, replicaSetsForTask -> {
                // Create the configuration for each task ...
                int taskId = taskConfigs.size();
                logger.info("Configuring MongoDB connector task {} to capture events for replica set(s) at {}", taskId, replicaSetsForTask.hosts());
                taskConfigs.add(config.edit().with(MongoDbConnectorConfig.HOSTS, replicaSetsForTask.hosts()).with(MongoDbConnectorConfig.TASK_ID, taskId).build().asMap());
            });
        }
        logger.debug("Configuring {} MongoDB connector task(s)", taskConfigs.size());
        return taskConfigs;
    } finally {
        previousLogContext.restore();
    }
}
Also used : PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ArrayList(java.util.ArrayList) Map(java.util.Map)

Example 5 with PreviousContext

use of io.debezium.util.LoggingContext.PreviousContext in project debezium by debezium.

the class MongoDbConnector method stop.

@Override
public void stop() {
    PreviousContext previousLogContext = taskContext != null ? taskContext.configureLoggingContext("conn") : null;
    try {
        logger.info("Stopping MongoDB connector");
        this.config = null;
        if (replicaSetMonitorExecutor != null)
            replicaSetMonitorExecutor.shutdownNow();
        try {
            if (this.connectionContext != null)
                this.connectionContext.shutdown();
        } finally {
            logger.info("Stopped MongoDB connector");
        }
    } finally {
        if (previousLogContext != null)
            previousLogContext.restore();
    }
}
Also used : PreviousContext(io.debezium.util.LoggingContext.PreviousContext)

Aggregations

PreviousContext (io.debezium.util.LoggingContext.PreviousContext)11 ArrayList (java.util.ArrayList)4 ConnectException (org.apache.kafka.connect.errors.ConnectException)4 Map (java.util.Map)3 Configuration (io.debezium.config.Configuration)2 LoggingContext (io.debezium.util.LoggingContext)2 HashMap (java.util.HashMap)2 JsonNode (com.fasterxml.jackson.databind.JsonNode)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 SchemaUtil (io.debezium.data.SchemaUtil)1 VerifyRecord (io.debezium.data.VerifyRecord)1 RecordValueComparator (io.debezium.data.VerifyRecord.RecordValueComparator)1 Array (io.debezium.document.Array)1 ArrayReader (io.debezium.document.ArrayReader)1 ArrayWriter (io.debezium.document.ArrayWriter)1 Document (io.debezium.document.Document)1 DocumentReader (io.debezium.document.DocumentReader)1 Value (io.debezium.document.Value)1 CompletionCallback (io.debezium.embedded.EmbeddedEngine.CompletionCallback)1 CompletionResult (io.debezium.embedded.EmbeddedEngine.CompletionResult)1