Search in sources :

Example 1 with Configuration

use of io.debezium.config.Configuration in project eventuate-local by eventuate-local.

the class DebeziumCdcStartupValidator method validateDatasourceConnection.

private void validateDatasourceConnection() {
    logger.info("About to validate DataSource connection");
    Map<String, String> connectorConfig = new HashMap<>();
    connectorConfig.put(MySqlConnectorConfig.HOSTNAME.name(), jdbcUrl.getHost());
    connectorConfig.put(MySqlConnectorConfig.PORT.name(), String.valueOf(jdbcUrl.getPort()));
    connectorConfig.put(MySqlConnectorConfig.USER.name(), dbUser);
    connectorConfig.put(MySqlConnectorConfig.PASSWORD.name(), dbPassword);
    Configuration config = Configuration.from(connectorConfig);
    try (MySqlJdbcContext jdbcContext = new MySqlJdbcContext(config)) {
        jdbcContext.start();
        JdbcConnection mysql = jdbcContext.jdbc();
        int i = mySqlValidationMaxAttempts;
        SQLException lastException = null;
        while (i > 0) {
            try {
                mysql.execute("SELECT version()");
                logger.info("Successfully tested connection for {}:{} with user '{}'", jdbcContext.hostname(), jdbcContext.port(), mysql.username());
                return;
            } catch (SQLException e) {
                lastException = e;
                logger.info("Failed testing connection for {}:{} with user '{}'", jdbcContext.hostname(), jdbcContext.port(), mysql.username());
                i--;
                try {
                    Thread.sleep(mySqlValidationTimeoutMillis);
                } catch (InterruptedException ie) {
                    throw new RuntimeException("MySql validation had been interrupted!", ie);
                }
            }
        }
        jdbcContext.shutdown();
        throw new RuntimeException(lastException);
    }
}
Also used : Configuration(io.debezium.config.Configuration) HashMap(java.util.HashMap) MySqlJdbcContext(io.debezium.connector.mysql.MySqlJdbcContext) SQLException(java.sql.SQLException) JdbcConnection(io.debezium.jdbc.JdbcConnection)

Example 2 with Configuration

use of io.debezium.config.Configuration in project eventuate-local by eventuate-local.

the class MySqlBinLogBasedEventTableChangesToAggregateTopicRelay method startCapturingChanges.

public CompletableFuture<Object> startCapturingChanges() throws InterruptedException {
    logger.debug("Starting to capture changes");
    cdcStartupValidator.validateEnvironment();
    producer = new EventuateKafkaProducer(kafkaBootstrapServers);
    String connectorName = "my-sql-connector";
    Configuration config = Configuration.create().with("connector.class", "io.debezium.connector.mysql.MySqlConnector").with("offset.storage", KafkaOffsetBackingStore.class.getName()).with("bootstrap.servers", kafkaBootstrapServers).with("offset.storage.topic", "eventuate.local.cdc." + connectorName + ".offset.storage").with("poll.interval.ms", 50).with("offset.flush.interval.ms", 6000).with("name", connectorName).with("database.hostname", jdbcUrl.getHost()).with("database.port", jdbcUrl.getPort()).with("database.user", dbUser).with("database.password", dbPassword).with("database.server.id", 85744).with("database.server.name", "my-app-connector").with("table.whitelist", eventuateSchema.isEmpty() ? jdbcUrl.getDatabase() + ".events" : eventuateSchema.qualifyTable("events")).with("database.history", io.debezium.relational.history.KafkaDatabaseHistory.class.getName()).with("database.history.kafka.topic", "eventuate.local.cdc." + connectorName + ".history.kafka.topic").with("database.history.kafka.bootstrap.servers", kafkaBootstrapServers).build();
    CompletableFuture<Object> completion = new CompletableFuture<>();
    engine = EmbeddedEngine.create().using((success, message, throwable) -> {
        if (success) {
            completion.complete(null);
        } else
            completion.completeExceptionally(new RuntimeException("Engine through exception" + message, throwable));
    }).using(config).notifying(this::receiveEvent).build();
    Executor executor = Executors.newCachedThreadPool();
    executor.execute(() -> {
        try {
            engine.run();
        } catch (Throwable t) {
            t.printStackTrace();
        }
    });
    logger.debug("Started engine");
    return completion;
}
Also used : EventuateKafkaProducer(io.eventuate.local.java.kafka.producer.EventuateKafkaProducer) CompletableFuture(java.util.concurrent.CompletableFuture) Executor(java.util.concurrent.Executor) Configuration(io.debezium.config.Configuration) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore)

Example 3 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class AbstractConnectorTest method readLastCommittedOffsets.

/**
 * Utility to read the last committed offsets for the specified partitions.
 *
 * @param config the configuration of the engine used to persist the offsets
 * @param partitions the partitions
 * @return the map of partitions to offsets; never null but possibly empty
 */
protected <T> Map<Map<String, T>, Map<String, Object>> readLastCommittedOffsets(Configuration config, Collection<Map<String, T>> partitions) {
    config = config.edit().with(EmbeddedEngine.ENGINE_NAME, "testing-connector").with(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).with(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
    final String engineName = config.getString(EmbeddedEngine.ENGINE_NAME);
    Converter keyConverter = config.getInstance(EmbeddedEngine.INTERNAL_KEY_CONVERTER_CLASS, Converter.class);
    keyConverter.configure(config.subset(EmbeddedEngine.INTERNAL_KEY_CONVERTER_CLASS.name() + ".", true).asMap(), true);
    Converter valueConverter = config.getInstance(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS, Converter.class);
    Configuration valueConverterConfig = config;
    if (valueConverter instanceof JsonConverter) {
        // Make sure that the JSON converter is configured to NOT enable schemas ...
        valueConverterConfig = config.edit().with(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS + ".schemas.enable", false).build();
    }
    valueConverter.configure(valueConverterConfig.subset(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS.name() + ".", true).asMap(), false);
    // Create the worker config, adding extra fields that are required for validation of a worker config
    // but that are not used within the embedded engine (since the source records are never serialized) ...
    Map<String, String> embeddedConfig = config.asMap(EmbeddedEngine.ALL_FIELDS);
    embeddedConfig.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, JsonConverter.class.getName());
    embeddedConfig.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, JsonConverter.class.getName());
    WorkerConfig workerConfig = new EmbeddedConfig(embeddedConfig);
    FileOffsetBackingStore offsetStore = new FileOffsetBackingStore();
    offsetStore.configure(workerConfig);
    offsetStore.start();
    try {
        OffsetStorageReaderImpl offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter);
        return offsetReader.offsets(partitions);
    } finally {
        offsetStore.stop();
    }
}
Also used : Configuration(io.debezium.config.Configuration) JsonConverter(org.apache.kafka.connect.json.JsonConverter) Converter(org.apache.kafka.connect.storage.Converter) JsonConverter(org.apache.kafka.connect.json.JsonConverter) WorkerConfig(org.apache.kafka.connect.runtime.WorkerConfig) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) EmbeddedConfig(io.debezium.embedded.EmbeddedEngine.EmbeddedConfig) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl)

Example 4 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class ConnectorOutputTest method runConnector.

/**
 * Run the connector described by the supplied test specification.
 *
 * @param spec the test specification
 * @param callback the function that should be called when the connector is stopped
 */
protected void runConnector(TestSpecification spec, CompletionCallback callback) {
    PreviousContext preRunContext = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
    final Configuration environmentConfig = Configuration.copy(spec.environment()).build();
    final Configuration connectorConfig = spec.config();
    String[] ignorableFieldNames = environmentConfig.getString(ENV_IGNORE_FIELDS, "").split(",");
    final Set<String> ignorableFields = Arrays.stream(ignorableFieldNames).map(String::trim).collect(Collectors.toSet());
    String[] globallyIgnorableFieldNames = globallyIgnorableFieldNames();
    if (globallyIgnorableFieldNames != null && globallyIgnorableFieldNames.length != 0) {
        ignorableFields.addAll(Arrays.stream(globallyIgnorableFieldNames).map(String::trim).collect(Collectors.toSet()));
    }
    final SchemaAndValueConverter keyConverter = new SchemaAndValueConverter(environmentConfig, true);
    final SchemaAndValueConverter valueConverter = new SchemaAndValueConverter(environmentConfig, false);
    final TestData testData = spec.testData();
    // Get any special comparators ...
    final Map<String, RecordValueComparator> comparatorsByFieldName = new HashMap<>();
    addValueComparatorsByFieldPath(comparatorsByFieldName::put);
    final Map<String, RecordValueComparator> comparatorsBySchemaName = new HashMap<>();
    addValueComparatorsBySchemaName(comparatorsBySchemaName::put);
    RuntimeException runError = null;
    CompletionResult problem = new CompletionResult(callback);
    try {
        // Set up the test data ...
        final PreviewIterator<Document> expectedRecords = Iterators.preview(testData.read());
        final Consumer<Document> recorder = testData::write;
        // We need something that will measure the amount of time since our consumer has seen a record ...
        TimeSince timeSinceLastRecord = Threads.timeSince(Clock.SYSTEM);
        // We'll keep the last 10 expected and actual records so that there is some context if they don't match ...
        Queue<SourceRecord> actualRecordHistory = fixedSizeQueue(10);
        Queue<SourceRecord> expectedRecordHistory = fixedSizeQueue(10);
        // Define what happens for each record ...
        ConsumerCompletion result = new ConsumerCompletion();
        Consumer<SourceRecord> consumer = (actualRecord) -> {
            PreviousContext prev = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
            try {
                Testing.debug("actual record:    " + SchemaUtil.asString(actualRecord));
                timeSinceLastRecord.reset();
                // Record the actual in the history ...
                actualRecordHistory.add(actualRecord);
                // And possibly hand it to the test's recorder ...
                try {
                    Document jsonRecord = serializeSourceRecord(actualRecord, keyConverter, valueConverter);
                    if (jsonRecord != null)
                        recorder.accept(jsonRecord);
                } catch (IOException e) {
                    String msg = "Error converting JSON to SourceRecord";
                    Testing.debug(msg);
                    throw new ConnectException(msg, e);
                }
                if (expectedRecords != null) {
                    // Get the test's next expected record ...
                    if (!expectedRecords.hasNext()) {
                        // We received an actual record but don't have or expect one ...
                        String msg = "Source record found but nothing expected";
                        result.error();
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    }
                    Document expected = expectedRecords.next();
                    if (isEndCommand(expected)) {
                        result.error();
                        String msg = "Source record was found but not expected: " + SchemaUtil.asString(actualRecord);
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    } else if (isCommand(expected)) {
                        Testing.debug("applying command: " + SchemaUtil.asString(expected));
                        applyCommand(expected, result);
                    } else {
                        try {
                            // Otherwise, build a record from the expected and add it to the history ...
                            SourceRecord expectedRecord = rehydrateSourceRecord(expected, keyConverter, valueConverter);
                            expectedRecordHistory.add(expectedRecord);
                            Testing.debug("expected record:  " + SchemaUtil.asString(expectedRecord));
                            // And compare the records ...
                            try {
                                assertSourceRecordMatch(actualRecord, expectedRecord, ignorableFields::contains, comparatorsByFieldName, comparatorsBySchemaName);
                            } catch (AssertionError e) {
                                result.error();
                                String msg = "Source record with key " + SchemaUtil.asString(actualRecord.key()) + " did not match expected record: " + e.getMessage();
                                Testing.debug(msg);
                                throw new MismatchRecordException(e, msg, actualRecordHistory, expectedRecordHistory);
                            }
                        } catch (IOException e) {
                            result.exception();
                            String msg = "Error converting JSON to SourceRecord";
                            Testing.debug(msg);
                            throw new ConnectException(msg, e);
                        }
                    }
                    if (!expectedRecords.hasNext()) {
                        // We expect no more records, so stop the connector ...
                        result.stop();
                        String msg = "Stopping connector after no more expected records found";
                        Testing.debug(msg);
                        throw new StopConnectorException(msg);
                    }
                    // Peek at the next record to see if it is a command ...
                    Document nextExpectedRecord = expectedRecords.peek();
                    if (isCommand(nextExpectedRecord)) {
                        // consume it and apply it ...
                        applyCommand(expectedRecords.next(), result);
                    }
                }
            } finally {
                prev.restore();
            }
        };
        // Set up the configuration for the engine to include the connector configuration and apply as defaults
        // the environment and engine parameters ...
        Configuration engineConfig = Configuration.copy(connectorConfig).withDefault(environmentConfig).withDefault(EmbeddedEngine.ENGINE_NAME, spec.name()).withDefault(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).withDefault(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
        // Create the engine ...
        EmbeddedEngine engine = EmbeddedEngine.create().using(engineConfig).notifying(consumer).using(this.getClass().getClassLoader()).using(problem).build();
        long connectorTimeoutInSeconds = environmentConfig.getLong(ENV_CONNECTOR_TIMEOUT_IN_SECONDS, 10);
        // Get ready to run the connector one or more times ...
        do {
            // Each time create a thread that will stop our connector if we don't get enough results
            Thread timeoutThread = Threads.timeout(spec.name() + "-connector-output", connectorTimeoutInSeconds, TimeUnit.SECONDS, timeSinceLastRecord, engine::stop);
            // But plan to stop our timeout thread as soon as the connector completes ...
            result.uponCompletion(timeoutThread::interrupt);
            timeoutThread.start();
            // Run the connector and block until the connector is stopped by the timeout thread or until
            // an exception is thrown within the connector (perhaps by the consumer) ...
            Testing.debug("Starting connector");
            result.reset();
            engine.run();
        } while (result.get() == ExecutionResult.RESTART_REQUESTED);
    } catch (IOException e) {
        runError = new RuntimeException("Error reading test data: " + e.getMessage(), e);
    } catch (RuntimeException t) {
        runError = t;
    } finally {
        // And clean up everything ...
        try {
            testData.close();
        } catch (IOException e) {
            if (runError != null) {
                runError = new RuntimeException("Error closing test data: " + e.getMessage(), e);
            }
        } finally {
            try {
                keyConverter.close();
            } finally {
                try {
                    valueConverter.close();
                } finally {
                    preRunContext.restore();
                }
            }
        }
    }
    if (runError != null) {
        throw runError;
    }
    if (problem.hasError()) {
        Throwable error = problem.error();
        if (error instanceof AssertionError) {
            fail(problem.message());
        } else if (error instanceof MismatchRecordException) {
            MismatchRecordException mismatch = (MismatchRecordException) error;
            LinkedList<SourceRecord> actualHistory = mismatch.getActualRecords();
            LinkedList<SourceRecord> expectedHistory = mismatch.getExpectedRecords();
            Testing.print("");
            Testing.print("FAILURE in connector integration test '" + spec.name() + "' in class " + getClass());
            Testing.print(" actual record:   " + SchemaUtil.asString(actualHistory.getLast()));
            Testing.print(" expected record: " + SchemaUtil.asString(expectedHistory.getLast()));
            Testing.print(mismatch.getMessage());
            Testing.print("");
            AssertionError cause = ((MismatchRecordException) error).getError();
            if (cause != null) {
                throw cause;
            }
            fail(problem.message());
        } else if (error instanceof RuntimeException) {
            throw (RuntimeException) error;
        } else {
            throw new RuntimeException(error);
        }
    }
}
Also used : Arrays(java.util.Arrays) PreviewIterator(io.debezium.util.Iterators.PreviewIterator) Threads(io.debezium.util.Threads) Schema(org.apache.kafka.connect.data.Schema) LoggingContext(io.debezium.util.LoggingContext) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) Map(java.util.Map) After(org.junit.After) Assert.fail(org.junit.Assert.fail) JsonNode(com.fasterxml.jackson.databind.JsonNode) Path(java.nio.file.Path) DocumentReader(io.debezium.document.DocumentReader) Predicate(java.util.function.Predicate) Set(java.util.Set) Collectors(java.util.stream.Collectors) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ArrayReader(io.debezium.document.ArrayReader) StandardCharsets(java.nio.charset.StandardCharsets) List(java.util.List) Queue(java.util.Queue) JsonConverter(org.apache.kafka.connect.json.JsonConverter) SchemaUtil(io.debezium.data.SchemaUtil) Value(io.debezium.document.Value) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) Array(io.debezium.document.Array) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ArrayList(java.util.ArrayList) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Document(io.debezium.document.Document) BiConsumer(java.util.function.BiConsumer) ArrayWriter(io.debezium.document.ArrayWriter) LinkedList(java.util.LinkedList) Strings(io.debezium.util.Strings) Before(org.junit.Before) OutputStream(java.io.OutputStream) Properties(java.util.Properties) Iterators(io.debezium.util.Iterators) Iterator(java.util.Iterator) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) Configuration(io.debezium.config.Configuration) FileInputStream(java.io.FileInputStream) CompletionCallback(io.debezium.embedded.EmbeddedEngine.CompletionCallback) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) Testing(io.debezium.util.Testing) Paths(java.nio.file.Paths) Collect(io.debezium.util.Collect) ConnectException(org.apache.kafka.connect.errors.ConnectException) VerifyRecord(io.debezium.data.VerifyRecord) Clock(io.debezium.util.Clock) TimeSince(io.debezium.util.Threads.TimeSince) IoUtil(io.debezium.util.IoUtil) InputStream(java.io.InputStream) Configuration(io.debezium.config.Configuration) HashMap(java.util.HashMap) Document(io.debezium.document.Document) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConnectException(org.apache.kafka.connect.errors.ConnectException) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) TimeSince(io.debezium.util.Threads.TimeSince)

Example 5 with Configuration

use of io.debezium.config.Configuration in project debezium by debezium.

the class EmbeddedEngine method run.

/**
 * Run this embedded connector and deliver database changes to the registered {@link Consumer}. This method blocks until
 * the connector is stopped.
 * <p>
 * First, the method checks to see if this instance is currently {@link #run() running}, and if so immediately returns.
 * <p>
 * If the configuration is valid, this method starts the connector and starts polling the connector for change events.
 * All messages are delivered in batches to the {@link Consumer} registered with this embedded connector. The batch size,
 * polling
 * frequency, and other parameters are controlled via configuration settings. This continues until this connector is
 * {@link #stop() stopped}.
 * <p>
 * Note that there are two ways to stop a connector running on a thread: calling {@link #stop()} from another thread, or
 * interrupting the thread (e.g., via {@link ExecutorService#shutdownNow()}).
 * <p>
 * This method can be called repeatedly as needed.
 */
@Override
public void run() {
    if (runningThread.compareAndSet(null, Thread.currentThread())) {
        final String engineName = config.getString(ENGINE_NAME);
        final String connectorClassName = config.getString(CONNECTOR_CLASS);
        final Optional<ConnectorCallback> connectorCallback = Optional.ofNullable(this.connectorCallback);
        // Only one thread can be in this part of the method at a time ...
        latch.countUp();
        try {
            if (!config.validateAndRecord(CONNECTOR_FIELDS, logger::error)) {
                fail("Failed to start connector with invalid configuration (see logs for actual errors)");
                return;
            }
            // Instantiate the connector ...
            SourceConnector connector = null;
            try {
                @SuppressWarnings("unchecked") Class<? extends SourceConnector> connectorClass = (Class<SourceConnector>) classLoader.loadClass(connectorClassName);
                connector = connectorClass.newInstance();
            } catch (Throwable t) {
                fail("Unable to instantiate connector class '" + connectorClassName + "'", t);
                return;
            }
            // Instantiate the offset store ...
            final String offsetStoreClassName = config.getString(OFFSET_STORAGE);
            OffsetBackingStore offsetStore = null;
            try {
                @SuppressWarnings("unchecked") Class<? extends OffsetBackingStore> offsetStoreClass = (Class<OffsetBackingStore>) classLoader.loadClass(offsetStoreClassName);
                offsetStore = offsetStoreClass.newInstance();
            } catch (Throwable t) {
                fail("Unable to instantiate OffsetBackingStore class '" + offsetStoreClassName + "'", t);
                return;
            }
            // Initialize the offset store ...
            try {
                offsetStore.configure(workerConfig);
                offsetStore.start();
            } catch (Throwable t) {
                fail("Unable to configure and start the '" + offsetStoreClassName + "' offset backing store", t);
                return;
            }
            // Set up the offset commit policy ...
            if (offsetCommitPolicy == null) {
                offsetCommitPolicy = config.getInstance(EmbeddedEngine.OFFSET_COMMIT_POLICY, OffsetCommitPolicy.class, config);
            }
            // Initialize the connector using a context that does NOT respond to requests to reconfigure tasks ...
            ConnectorContext context = new ConnectorContext() {

                @Override
                public void requestTaskReconfiguration() {
                // Do nothing ...
                }

                @Override
                public void raiseError(Exception e) {
                    fail(e.getMessage(), e);
                }
            };
            connector.initialize(context);
            OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, engineName, keyConverter, valueConverter);
            OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter);
            long commitTimeoutMs = config.getLong(OFFSET_COMMIT_TIMEOUT_MS);
            try {
                // Start the connector with the given properties and get the task configurations ...
                connector.start(config.asMap());
                connectorCallback.ifPresent(ConnectorCallback::connectorStarted);
                List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
                Class<? extends Task> taskClass = connector.taskClass();
                SourceTask task = null;
                try {
                    task = (SourceTask) taskClass.newInstance();
                } catch (IllegalAccessException | InstantiationException t) {
                    fail("Unable to instantiate connector's task class '" + taskClass.getName() + "'", t);
                    return;
                }
                try {
                    SourceTaskContext taskContext = () -> offsetReader;
                    task.initialize(taskContext);
                    task.start(taskConfigs.get(0));
                    connectorCallback.ifPresent(ConnectorCallback::taskStarted);
                } catch (Throwable t) {
                    // Mask the passwords ...
                    Configuration config = Configuration.from(taskConfigs.get(0)).withMaskedPasswords();
                    String msg = "Unable to initialize and start connector's task class '" + taskClass.getName() + "' with config: " + config;
                    fail(msg, t);
                    return;
                }
                recordsSinceLastCommit = 0;
                Throwable handlerError = null;
                try {
                    timeOfLastCommitMillis = clock.currentTimeInMillis();
                    boolean keepProcessing = true;
                    List<SourceRecord> changeRecords = null;
                    while (runningThread.get() != null && handlerError == null && keepProcessing) {
                        try {
                            try {
                                logger.debug("Embedded engine is polling task for records on thread " + runningThread.get());
                                // blocks until there are values ...
                                changeRecords = task.poll();
                                logger.debug("Embedded engine returned from polling task for records");
                            } catch (InterruptedException e) {
                                // Interrupted while polling ...
                                logger.debug("Embedded engine interrupted on thread " + runningThread.get() + " while polling the task for records");
                                Thread.interrupted();
                                break;
                            }
                            try {
                                if (changeRecords != null && !changeRecords.isEmpty()) {
                                    logger.debug("Received {} records from the task", changeRecords.size());
                                    // First forward the records to the connector's consumer ...
                                    for (SourceRecord record : changeRecords) {
                                        try {
                                            consumer.accept(record);
                                            task.commitRecord(record);
                                        } catch (StopConnectorException e) {
                                            keepProcessing = false;
                                            // Stop processing any more but first record the offset for this record's
                                            // partition
                                            offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
                                            recordsSinceLastCommit += 1;
                                            break;
                                        } catch (Throwable t) {
                                            handlerError = t;
                                            break;
                                        }
                                        // Record the offset for this record's partition
                                        offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
                                        recordsSinceLastCommit += 1;
                                    }
                                    // Flush the offsets to storage if necessary ...
                                    maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
                                } else {
                                    logger.debug("Received no records from the task");
                                }
                            } catch (Throwable t) {
                                // There was some sort of unexpected exception, so we should stop work
                                if (handlerError == null) {
                                    // make sure we capture the error first so that we can report it later
                                    handlerError = t;
                                }
                                break;
                            }
                        } finally {
                            // then try to commit the offsets, since we record them only after the records were handled
                            // by the consumer ...
                            maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
                        }
                    }
                } finally {
                    if (handlerError != null) {
                        // There was an error in the handler so make sure it's always captured...
                        fail("Stopping connector after error in the application's handler method: " + handlerError.getMessage(), handlerError);
                    }
                    try {
                        // First stop the task ...
                        logger.debug("Stopping the task and engine");
                        task.stop();
                        connectorCallback.ifPresent(ConnectorCallback::taskStopped);
                        // Always commit offsets that were captured from the source records we actually processed ...
                        commitOffsets(offsetWriter, commitTimeoutMs, task);
                        if (handlerError == null) {
                            // We stopped normally ...
                            succeed("Connector '" + connectorClassName + "' completed normally.");
                        }
                    } catch (Throwable t) {
                        fail("Error while trying to stop the task and commit the offsets", t);
                    }
                }
            } catch (Throwable t) {
                fail("Error while trying to run connector class '" + connectorClassName + "'", t);
            } finally {
                // Close the offset storage and finally the connector ...
                try {
                    offsetStore.stop();
                } catch (Throwable t) {
                    fail("Error while trying to stop the offset store", t);
                } finally {
                    try {
                        connector.stop();
                        connectorCallback.ifPresent(ConnectorCallback::connectorStopped);
                    } catch (Throwable t) {
                        fail("Error while trying to stop connector class '" + connectorClassName + "'", t);
                    }
                }
            }
        } finally {
            latch.countDown();
            runningThread.set(null);
            // after we've "shut down" the engine, fire the completion callback based on the results we collected
            completionCallback.handle(completionResult.success(), completionResult.message(), completionResult.error());
        }
    }
}
Also used : OffsetStorageWriter(org.apache.kafka.connect.storage.OffsetStorageWriter) Configuration(io.debezium.config.Configuration) SourceRecord(org.apache.kafka.connect.source.SourceRecord) OffsetCommitPolicy(io.debezium.embedded.spi.OffsetCommitPolicy) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) OffsetBackingStore(org.apache.kafka.connect.storage.OffsetBackingStore) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) ConnectorContext(org.apache.kafka.connect.connector.ConnectorContext) SourceTaskContext(org.apache.kafka.connect.source.SourceTaskContext) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl) SourceConnector(org.apache.kafka.connect.source.SourceConnector) SourceTask(org.apache.kafka.connect.source.SourceTask) OffsetStorageReader(org.apache.kafka.connect.storage.OffsetStorageReader) Map(java.util.Map)

Aggregations

Configuration (io.debezium.config.Configuration)38 Test (org.junit.Test)21 AbstractConnectorTest (io.debezium.embedded.AbstractConnectorTest)16 Config (org.apache.kafka.common.config.Config)15 CommonConnectorConfig (io.debezium.config.CommonConnectorConfig)10 FixFor (io.debezium.doc.FixFor)6 ConnectException (org.apache.kafka.connect.errors.ConnectException)6 ConfigValue (org.apache.kafka.common.config.ConfigValue)5 HashMap (java.util.HashMap)4 JsonConverter (org.apache.kafka.connect.json.JsonConverter)4 SQLException (java.sql.SQLException)3 Map (java.util.Map)3 Field (io.debezium.config.Field)2 SchemaUtil (io.debezium.data.SchemaUtil)2 VerifyRecord (io.debezium.data.VerifyRecord)2 CompletionCallback (io.debezium.embedded.EmbeddedEngine.CompletionCallback)2 EmbeddedConfig (io.debezium.embedded.EmbeddedEngine.EmbeddedConfig)2 JdbcConnection (io.debezium.jdbc.JdbcConnection)2 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)2 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)2