Search in sources :

Example 36 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project kafka-connect-kinesis by jcustenborder.

the class RecordConverterTest method test.

@Test
public void test() {
    Record record = new Record();
    final Date expectedDate = new Date();
    final String expectedPartitionKey = "Testing";
    final byte[] expectedData = "Testing data".getBytes(Charsets.UTF_8);
    final String expectedSequenceNumber = "34523452";
    final String expectedTopic = "topic";
    final Struct expectedKey = new Struct(RecordConverter.SCHEMA_KINESIS_KEY).put(RecordConverter.FIELD_PARTITION_KEY, expectedPartitionKey);
    final Struct expectedValue = new Struct(RecordConverter.SCHEMA_KINESIS_VALUE).put(RecordConverter.FIELD_PARTITION_KEY, expectedPartitionKey).put(RecordConverter.FIELD_DATA, expectedData).put(RecordConverter.FIELD_APPROXIMATE_ARRIVAL_TIMESTAMP, expectedDate).put(RecordConverter.FIELD_SEQUENCE_NUMBER, expectedSequenceNumber).put(RecordConverter.FIELD_SHARD_ID, this.config.kinesisShardId).put(RecordConverter.FIELD_STREAM_NAME, this.config.kinesisStreamName);
    final Map<String, Object> sourcePartition = ImmutableMap.of(RecordConverter.FIELD_SHARD_ID, SHARD_ID);
    final Map<String, Object> sourceOffset = ImmutableMap.of(RecordConverter.FIELD_SEQUENCE_NUMBER, expectedSequenceNumber);
    final SourceRecord expectedSourceRecord = new SourceRecord(sourcePartition, sourceOffset, expectedTopic, null, RecordConverter.SCHEMA_KINESIS_KEY, expectedKey, RecordConverter.SCHEMA_KINESIS_VALUE, expectedValue, expectedDate.getTime());
    record.setData(ByteBuffer.wrap(expectedData));
    record.setApproximateArrivalTimestamp(expectedDate);
    record.setPartitionKey(expectedPartitionKey);
    record.setSequenceNumber(expectedSequenceNumber);
    SourceRecord actualRecord = this.recordConverter.sourceRecord(this.config.kinesisStreamName, this.config.kinesisShardId, record);
    assertNotNull(actualRecord, "record should not be null.");
    assertSourceRecord(expectedSourceRecord, actualRecord);
}
Also used : Record(com.amazonaws.services.kinesis.model.Record) AssertConnectRecord.assertSourceRecord(com.github.jcustenborder.kafka.connect.utils.AssertConnectRecord.assertSourceRecord) SourceRecord(org.apache.kafka.connect.source.SourceRecord) AssertConnectRecord.assertSourceRecord(com.github.jcustenborder.kafka.connect.utils.AssertConnectRecord.assertSourceRecord) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Date(java.util.Date) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.jupiter.api.Test)

Example 37 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class ConnectorOutputTest method runConnector.

/**
 * Run the connector described by the supplied test specification.
 *
 * @param spec the test specification
 * @param callback the function that should be called when the connector is stopped
 */
protected void runConnector(TestSpecification spec, CompletionCallback callback) {
    PreviousContext preRunContext = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
    final Configuration environmentConfig = Configuration.copy(spec.environment()).build();
    final Configuration connectorConfig = spec.config();
    String[] ignorableFieldNames = environmentConfig.getString(ENV_IGNORE_FIELDS, "").split(",");
    final Set<String> ignorableFields = Arrays.stream(ignorableFieldNames).map(String::trim).collect(Collectors.toSet());
    String[] globallyIgnorableFieldNames = globallyIgnorableFieldNames();
    if (globallyIgnorableFieldNames != null && globallyIgnorableFieldNames.length != 0) {
        ignorableFields.addAll(Arrays.stream(globallyIgnorableFieldNames).map(String::trim).collect(Collectors.toSet()));
    }
    final SchemaAndValueConverter keyConverter = new SchemaAndValueConverter(environmentConfig, true);
    final SchemaAndValueConverter valueConverter = new SchemaAndValueConverter(environmentConfig, false);
    final TestData testData = spec.testData();
    // Get any special comparators ...
    final Map<String, RecordValueComparator> comparatorsByFieldName = new HashMap<>();
    addValueComparatorsByFieldPath(comparatorsByFieldName::put);
    final Map<String, RecordValueComparator> comparatorsBySchemaName = new HashMap<>();
    addValueComparatorsBySchemaName(comparatorsBySchemaName::put);
    RuntimeException runError = null;
    CompletionResult problem = new CompletionResult(callback);
    try {
        // Set up the test data ...
        final PreviewIterator<Document> expectedRecords = Iterators.preview(testData.read());
        final Consumer<Document> recorder = testData::write;
        // We need something that will measure the amount of time since our consumer has seen a record ...
        TimeSince timeSinceLastRecord = Threads.timeSince(Clock.SYSTEM);
        // We'll keep the last 10 expected and actual records so that there is some context if they don't match ...
        Queue<SourceRecord> actualRecordHistory = fixedSizeQueue(10);
        Queue<SourceRecord> expectedRecordHistory = fixedSizeQueue(10);
        // Define what happens for each record ...
        ConsumerCompletion result = new ConsumerCompletion();
        Consumer<SourceRecord> consumer = (actualRecord) -> {
            PreviousContext prev = LoggingContext.forConnector(getClass().getSimpleName(), "runner", spec.name());
            try {
                Testing.debug("actual record:    " + SchemaUtil.asString(actualRecord));
                timeSinceLastRecord.reset();
                // Record the actual in the history ...
                actualRecordHistory.add(actualRecord);
                // And possibly hand it to the test's recorder ...
                try {
                    Document jsonRecord = serializeSourceRecord(actualRecord, keyConverter, valueConverter);
                    if (jsonRecord != null)
                        recorder.accept(jsonRecord);
                } catch (IOException e) {
                    String msg = "Error converting JSON to SourceRecord";
                    Testing.debug(msg);
                    throw new ConnectException(msg, e);
                }
                if (expectedRecords != null) {
                    // Get the test's next expected record ...
                    if (!expectedRecords.hasNext()) {
                        // We received an actual record but don't have or expect one ...
                        String msg = "Source record found but nothing expected";
                        result.error();
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    }
                    Document expected = expectedRecords.next();
                    if (isEndCommand(expected)) {
                        result.error();
                        String msg = "Source record was found but not expected: " + SchemaUtil.asString(actualRecord);
                        Testing.debug(msg);
                        throw new MismatchRecordException(msg, actualRecordHistory, expectedRecordHistory);
                    } else if (isCommand(expected)) {
                        Testing.debug("applying command: " + SchemaUtil.asString(expected));
                        applyCommand(expected, result);
                    } else {
                        try {
                            // Otherwise, build a record from the expected and add it to the history ...
                            SourceRecord expectedRecord = rehydrateSourceRecord(expected, keyConverter, valueConverter);
                            expectedRecordHistory.add(expectedRecord);
                            Testing.debug("expected record:  " + SchemaUtil.asString(expectedRecord));
                            // And compare the records ...
                            try {
                                assertSourceRecordMatch(actualRecord, expectedRecord, ignorableFields::contains, comparatorsByFieldName, comparatorsBySchemaName);
                            } catch (AssertionError e) {
                                result.error();
                                String msg = "Source record with key " + SchemaUtil.asString(actualRecord.key()) + " did not match expected record: " + e.getMessage();
                                Testing.debug(msg);
                                throw new MismatchRecordException(e, msg, actualRecordHistory, expectedRecordHistory);
                            }
                        } catch (IOException e) {
                            result.exception();
                            String msg = "Error converting JSON to SourceRecord";
                            Testing.debug(msg);
                            throw new ConnectException(msg, e);
                        }
                    }
                    if (!expectedRecords.hasNext()) {
                        // We expect no more records, so stop the connector ...
                        result.stop();
                        String msg = "Stopping connector after no more expected records found";
                        Testing.debug(msg);
                        throw new StopConnectorException(msg);
                    }
                    // Peek at the next record to see if it is a command ...
                    Document nextExpectedRecord = expectedRecords.peek();
                    if (isCommand(nextExpectedRecord)) {
                        // consume it and apply it ...
                        applyCommand(expectedRecords.next(), result);
                    }
                }
            } finally {
                prev.restore();
            }
        };
        // Set up the configuration for the engine to include the connector configuration and apply as defaults
        // the environment and engine parameters ...
        Configuration engineConfig = Configuration.copy(connectorConfig).withDefault(environmentConfig).withDefault(EmbeddedEngine.ENGINE_NAME, spec.name()).withDefault(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH).withDefault(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS, 0).build();
        // Create the engine ...
        EmbeddedEngine engine = EmbeddedEngine.create().using(engineConfig).notifying(consumer).using(this.getClass().getClassLoader()).using(problem).build();
        long connectorTimeoutInSeconds = environmentConfig.getLong(ENV_CONNECTOR_TIMEOUT_IN_SECONDS, 10);
        // Get ready to run the connector one or more times ...
        do {
            // Each time create a thread that will stop our connector if we don't get enough results
            Thread timeoutThread = Threads.timeout(spec.name() + "-connector-output", connectorTimeoutInSeconds, TimeUnit.SECONDS, timeSinceLastRecord, engine::stop);
            // But plan to stop our timeout thread as soon as the connector completes ...
            result.uponCompletion(timeoutThread::interrupt);
            timeoutThread.start();
            // Run the connector and block until the connector is stopped by the timeout thread or until
            // an exception is thrown within the connector (perhaps by the consumer) ...
            Testing.debug("Starting connector");
            result.reset();
            engine.run();
        } while (result.get() == ExecutionResult.RESTART_REQUESTED);
    } catch (IOException e) {
        runError = new RuntimeException("Error reading test data: " + e.getMessage(), e);
    } catch (RuntimeException t) {
        runError = t;
    } finally {
        // And clean up everything ...
        try {
            testData.close();
        } catch (IOException e) {
            if (runError != null) {
                runError = new RuntimeException("Error closing test data: " + e.getMessage(), e);
            }
        } finally {
            try {
                keyConverter.close();
            } finally {
                try {
                    valueConverter.close();
                } finally {
                    preRunContext.restore();
                }
            }
        }
    }
    if (runError != null) {
        throw runError;
    }
    if (problem.hasError()) {
        Throwable error = problem.error();
        if (error instanceof AssertionError) {
            fail(problem.message());
        } else if (error instanceof MismatchRecordException) {
            MismatchRecordException mismatch = (MismatchRecordException) error;
            LinkedList<SourceRecord> actualHistory = mismatch.getActualRecords();
            LinkedList<SourceRecord> expectedHistory = mismatch.getExpectedRecords();
            Testing.print("");
            Testing.print("FAILURE in connector integration test '" + spec.name() + "' in class " + getClass());
            Testing.print(" actual record:   " + SchemaUtil.asString(actualHistory.getLast()));
            Testing.print(" expected record: " + SchemaUtil.asString(expectedHistory.getLast()));
            Testing.print(mismatch.getMessage());
            Testing.print("");
            AssertionError cause = ((MismatchRecordException) error).getError();
            if (cause != null) {
                throw cause;
            }
            fail(problem.message());
        } else if (error instanceof RuntimeException) {
            throw (RuntimeException) error;
        } else {
            throw new RuntimeException(error);
        }
    }
}
Also used : Arrays(java.util.Arrays) PreviewIterator(io.debezium.util.Iterators.PreviewIterator) Threads(io.debezium.util.Threads) Schema(org.apache.kafka.connect.data.Schema) LoggingContext(io.debezium.util.LoggingContext) JsonDeserializer(org.apache.kafka.connect.json.JsonDeserializer) Map(java.util.Map) After(org.junit.After) Assert.fail(org.junit.Assert.fail) JsonNode(com.fasterxml.jackson.databind.JsonNode) Path(java.nio.file.Path) DocumentReader(io.debezium.document.DocumentReader) Predicate(java.util.function.Predicate) Set(java.util.Set) Collectors(java.util.stream.Collectors) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ArrayReader(io.debezium.document.ArrayReader) StandardCharsets(java.nio.charset.StandardCharsets) List(java.util.List) Queue(java.util.Queue) JsonConverter(org.apache.kafka.connect.json.JsonConverter) SchemaUtil(io.debezium.data.SchemaUtil) Value(io.debezium.document.Value) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) Array(io.debezium.document.Array) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) ArrayList(java.util.ArrayList) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Document(io.debezium.document.Document) BiConsumer(java.util.function.BiConsumer) ArrayWriter(io.debezium.document.ArrayWriter) LinkedList(java.util.LinkedList) Strings(io.debezium.util.Strings) Before(org.junit.Before) OutputStream(java.io.OutputStream) Properties(java.util.Properties) Iterators(io.debezium.util.Iterators) Iterator(java.util.Iterator) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) Configuration(io.debezium.config.Configuration) FileInputStream(java.io.FileInputStream) CompletionCallback(io.debezium.embedded.EmbeddedEngine.CompletionCallback) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) JsonSerializer(org.apache.kafka.connect.json.JsonSerializer) Testing(io.debezium.util.Testing) Paths(java.nio.file.Paths) Collect(io.debezium.util.Collect) ConnectException(org.apache.kafka.connect.errors.ConnectException) VerifyRecord(io.debezium.data.VerifyRecord) Clock(io.debezium.util.Clock) TimeSince(io.debezium.util.Threads.TimeSince) IoUtil(io.debezium.util.IoUtil) InputStream(java.io.InputStream) Configuration(io.debezium.config.Configuration) HashMap(java.util.HashMap) Document(io.debezium.document.Document) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConnectException(org.apache.kafka.connect.errors.ConnectException) CompletionResult(io.debezium.embedded.EmbeddedEngine.CompletionResult) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RecordValueComparator(io.debezium.data.VerifyRecord.RecordValueComparator) PreviousContext(io.debezium.util.LoggingContext.PreviousContext) TimeSince(io.debezium.util.Threads.TimeSince)

Example 38 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class UnwrapFromEnvelopeTest method testDeleteFrowardConfigured.

@Test
public void testDeleteFrowardConfigured() {
    try (final UnwrapFromEnvelope<SourceRecord> transform = new UnwrapFromEnvelope<>()) {
        final Map<String, String> props = new HashMap<>();
        props.put(DROP_DELETES, "false");
        transform.configure(props);
        final SourceRecord deleteRecord = createDeleteRecord();
        final SourceRecord tombstone = transform.apply(deleteRecord);
        assertThat(tombstone.value()).isNull();
    }
}
Also used : HashMap(java.util.HashMap) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Example 39 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class UnwrapFromEnvelopeTest method testIgnoreUnknownRecord.

@Test
public void testIgnoreUnknownRecord() {
    try (final UnwrapFromEnvelope<SourceRecord> transform = new UnwrapFromEnvelope<>()) {
        final Map<String, String> props = new HashMap<>();
        transform.configure(props);
        final SourceRecord unknownRecord = createUnknownRecord();
        assertThat(transform.apply(unknownRecord)).isEqualTo(unknownRecord);
        final SourceRecord unnamedSchemaRecord = createUnknownUnnamedSchemaRecord();
        assertThat(transform.apply(unnamedSchemaRecord)).isEqualTo(unnamedSchemaRecord);
    }
}
Also used : HashMap(java.util.HashMap) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.Test)

Example 40 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project debezium by debezium.

the class EmbeddedEngine method run.

/**
 * Run this embedded connector and deliver database changes to the registered {@link Consumer}. This method blocks until
 * the connector is stopped.
 * <p>
 * First, the method checks to see if this instance is currently {@link #run() running}, and if so immediately returns.
 * <p>
 * If the configuration is valid, this method starts the connector and starts polling the connector for change events.
 * All messages are delivered in batches to the {@link Consumer} registered with this embedded connector. The batch size,
 * polling
 * frequency, and other parameters are controlled via configuration settings. This continues until this connector is
 * {@link #stop() stopped}.
 * <p>
 * Note that there are two ways to stop a connector running on a thread: calling {@link #stop()} from another thread, or
 * interrupting the thread (e.g., via {@link ExecutorService#shutdownNow()}).
 * <p>
 * This method can be called repeatedly as needed.
 */
@Override
public void run() {
    if (runningThread.compareAndSet(null, Thread.currentThread())) {
        final String engineName = config.getString(ENGINE_NAME);
        final String connectorClassName = config.getString(CONNECTOR_CLASS);
        final Optional<ConnectorCallback> connectorCallback = Optional.ofNullable(this.connectorCallback);
        // Only one thread can be in this part of the method at a time ...
        latch.countUp();
        try {
            if (!config.validateAndRecord(CONNECTOR_FIELDS, logger::error)) {
                fail("Failed to start connector with invalid configuration (see logs for actual errors)");
                return;
            }
            // Instantiate the connector ...
            SourceConnector connector = null;
            try {
                @SuppressWarnings("unchecked") Class<? extends SourceConnector> connectorClass = (Class<SourceConnector>) classLoader.loadClass(connectorClassName);
                connector = connectorClass.newInstance();
            } catch (Throwable t) {
                fail("Unable to instantiate connector class '" + connectorClassName + "'", t);
                return;
            }
            // Instantiate the offset store ...
            final String offsetStoreClassName = config.getString(OFFSET_STORAGE);
            OffsetBackingStore offsetStore = null;
            try {
                @SuppressWarnings("unchecked") Class<? extends OffsetBackingStore> offsetStoreClass = (Class<OffsetBackingStore>) classLoader.loadClass(offsetStoreClassName);
                offsetStore = offsetStoreClass.newInstance();
            } catch (Throwable t) {
                fail("Unable to instantiate OffsetBackingStore class '" + offsetStoreClassName + "'", t);
                return;
            }
            // Initialize the offset store ...
            try {
                offsetStore.configure(workerConfig);
                offsetStore.start();
            } catch (Throwable t) {
                fail("Unable to configure and start the '" + offsetStoreClassName + "' offset backing store", t);
                return;
            }
            // Set up the offset commit policy ...
            if (offsetCommitPolicy == null) {
                offsetCommitPolicy = config.getInstance(EmbeddedEngine.OFFSET_COMMIT_POLICY, OffsetCommitPolicy.class, config);
            }
            // Initialize the connector using a context that does NOT respond to requests to reconfigure tasks ...
            ConnectorContext context = new ConnectorContext() {

                @Override
                public void requestTaskReconfiguration() {
                // Do nothing ...
                }

                @Override
                public void raiseError(Exception e) {
                    fail(e.getMessage(), e);
                }
            };
            connector.initialize(context);
            OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, engineName, keyConverter, valueConverter);
            OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter);
            long commitTimeoutMs = config.getLong(OFFSET_COMMIT_TIMEOUT_MS);
            try {
                // Start the connector with the given properties and get the task configurations ...
                connector.start(config.asMap());
                connectorCallback.ifPresent(ConnectorCallback::connectorStarted);
                List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
                Class<? extends Task> taskClass = connector.taskClass();
                SourceTask task = null;
                try {
                    task = (SourceTask) taskClass.newInstance();
                } catch (IllegalAccessException | InstantiationException t) {
                    fail("Unable to instantiate connector's task class '" + taskClass.getName() + "'", t);
                    return;
                }
                try {
                    SourceTaskContext taskContext = () -> offsetReader;
                    task.initialize(taskContext);
                    task.start(taskConfigs.get(0));
                    connectorCallback.ifPresent(ConnectorCallback::taskStarted);
                } catch (Throwable t) {
                    // Mask the passwords ...
                    Configuration config = Configuration.from(taskConfigs.get(0)).withMaskedPasswords();
                    String msg = "Unable to initialize and start connector's task class '" + taskClass.getName() + "' with config: " + config;
                    fail(msg, t);
                    return;
                }
                recordsSinceLastCommit = 0;
                Throwable handlerError = null;
                try {
                    timeOfLastCommitMillis = clock.currentTimeInMillis();
                    boolean keepProcessing = true;
                    List<SourceRecord> changeRecords = null;
                    while (runningThread.get() != null && handlerError == null && keepProcessing) {
                        try {
                            try {
                                logger.debug("Embedded engine is polling task for records on thread " + runningThread.get());
                                // blocks until there are values ...
                                changeRecords = task.poll();
                                logger.debug("Embedded engine returned from polling task for records");
                            } catch (InterruptedException e) {
                                // Interrupted while polling ...
                                logger.debug("Embedded engine interrupted on thread " + runningThread.get() + " while polling the task for records");
                                Thread.interrupted();
                                break;
                            }
                            try {
                                if (changeRecords != null && !changeRecords.isEmpty()) {
                                    logger.debug("Received {} records from the task", changeRecords.size());
                                    // First forward the records to the connector's consumer ...
                                    for (SourceRecord record : changeRecords) {
                                        try {
                                            consumer.accept(record);
                                            task.commitRecord(record);
                                        } catch (StopConnectorException e) {
                                            keepProcessing = false;
                                            // Stop processing any more but first record the offset for this record's
                                            // partition
                                            offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
                                            recordsSinceLastCommit += 1;
                                            break;
                                        } catch (Throwable t) {
                                            handlerError = t;
                                            break;
                                        }
                                        // Record the offset for this record's partition
                                        offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
                                        recordsSinceLastCommit += 1;
                                    }
                                    // Flush the offsets to storage if necessary ...
                                    maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
                                } else {
                                    logger.debug("Received no records from the task");
                                }
                            } catch (Throwable t) {
                                // There was some sort of unexpected exception, so we should stop work
                                if (handlerError == null) {
                                    // make sure we capture the error first so that we can report it later
                                    handlerError = t;
                                }
                                break;
                            }
                        } finally {
                            // then try to commit the offsets, since we record them only after the records were handled
                            // by the consumer ...
                            maybeFlush(offsetWriter, offsetCommitPolicy, commitTimeoutMs, task);
                        }
                    }
                } finally {
                    if (handlerError != null) {
                        // There was an error in the handler so make sure it's always captured...
                        fail("Stopping connector after error in the application's handler method: " + handlerError.getMessage(), handlerError);
                    }
                    try {
                        // First stop the task ...
                        logger.debug("Stopping the task and engine");
                        task.stop();
                        connectorCallback.ifPresent(ConnectorCallback::taskStopped);
                        // Always commit offsets that were captured from the source records we actually processed ...
                        commitOffsets(offsetWriter, commitTimeoutMs, task);
                        if (handlerError == null) {
                            // We stopped normally ...
                            succeed("Connector '" + connectorClassName + "' completed normally.");
                        }
                    } catch (Throwable t) {
                        fail("Error while trying to stop the task and commit the offsets", t);
                    }
                }
            } catch (Throwable t) {
                fail("Error while trying to run connector class '" + connectorClassName + "'", t);
            } finally {
                // Close the offset storage and finally the connector ...
                try {
                    offsetStore.stop();
                } catch (Throwable t) {
                    fail("Error while trying to stop the offset store", t);
                } finally {
                    try {
                        connector.stop();
                        connectorCallback.ifPresent(ConnectorCallback::connectorStopped);
                    } catch (Throwable t) {
                        fail("Error while trying to stop connector class '" + connectorClassName + "'", t);
                    }
                }
            }
        } finally {
            latch.countDown();
            runningThread.set(null);
            // after we've "shut down" the engine, fire the completion callback based on the results we collected
            completionCallback.handle(completionResult.success(), completionResult.message(), completionResult.error());
        }
    }
}
Also used : OffsetStorageWriter(org.apache.kafka.connect.storage.OffsetStorageWriter) Configuration(io.debezium.config.Configuration) SourceRecord(org.apache.kafka.connect.source.SourceRecord) OffsetCommitPolicy(io.debezium.embedded.spi.OffsetCommitPolicy) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) OffsetBackingStore(org.apache.kafka.connect.storage.OffsetBackingStore) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) ConnectorContext(org.apache.kafka.connect.connector.ConnectorContext) SourceTaskContext(org.apache.kafka.connect.source.SourceTaskContext) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl) SourceConnector(org.apache.kafka.connect.source.SourceConnector) SourceTask(org.apache.kafka.connect.source.SourceTask) OffsetStorageReader(org.apache.kafka.connect.storage.OffsetStorageReader) Map(java.util.Map)

Aggregations

SourceRecord (org.apache.kafka.connect.source.SourceRecord)308 Test (org.junit.Test)148 Test (org.junit.jupiter.api.Test)98 Struct (org.apache.kafka.connect.data.Struct)68 HashMap (java.util.HashMap)60 Schema (org.apache.kafka.connect.data.Schema)45 ThreadedTest (org.apache.kafka.connect.util.ThreadedTest)27 ParameterizedTest (org.apache.kafka.connect.util.ParameterizedTest)23 ArrayList (java.util.ArrayList)22 RetryWithToleranceOperatorTest (org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest)21 Map (java.util.Map)15 SchemaBuilder (org.apache.kafka.connect.data.SchemaBuilder)13 ConnectException (org.apache.kafka.connect.errors.ConnectException)13 Document (org.bson.Document)13 FixFor (io.debezium.doc.FixFor)12 List (java.util.List)12 RecordsForCollection (io.debezium.connector.mongodb.RecordMakers.RecordsForCollection)11 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)11 ConnectHeaders (org.apache.kafka.connect.header.ConnectHeaders)11 BsonTimestamp (org.bson.BsonTimestamp)11