Search in sources :

Example 6 with OffsetStorageReaderImpl

use of org.apache.kafka.connect.storage.OffsetStorageReaderImpl in project kafka by apache.

the class Worker method buildWorkerTask.

private WorkerTask buildWorkerTask(ClusterConfigState configState, ConnectorConfig connConfig, ConnectorTaskId id, Task task, TaskStatus.Listener statusListener, TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, ClassLoader loader) {
    ErrorHandlingMetrics errorHandlingMetrics = errorHandlingMetrics(id);
    final Class<? extends Connector> connectorClass = plugins.connectorClass(connConfig.getString(ConnectorConfig.CONNECTOR_CLASS_CONFIG));
    RetryWithToleranceOperator retryWithToleranceOperator = new RetryWithToleranceOperator(connConfig.errorRetryTimeout(), connConfig.errorMaxDelayInMillis(), connConfig.errorToleranceType(), Time.SYSTEM);
    retryWithToleranceOperator.metrics(errorHandlingMetrics);
    // Decide which type of worker task we need based on the type of task.
    if (task instanceof SourceTask) {
        SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connConfig.originalsStrings(), config.topicCreationEnable());
        retryWithToleranceOperator.reporters(sourceTaskReporters(id, sourceConfig, errorHandlingMetrics));
        TransformationChain<SourceRecord> transformationChain = new TransformationChain<>(sourceConfig.<SourceRecord>transformations(), retryWithToleranceOperator);
        log.info("Initializing: {}", transformationChain);
        CloseableOffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
        OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
        Map<String, Object> producerProps = producerConfigs(id, "connector-producer-" + id, config, sourceConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
        KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);
        TopicAdmin admin;
        Map<String, TopicCreationGroup> topicCreationGroups;
        if (config.topicCreationEnable() && sourceConfig.usesTopicCreation()) {
            Map<String, Object> adminProps = adminConfigs(id, "connector-adminclient-" + id, config, sourceConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
            admin = new TopicAdmin(adminProps);
            topicCreationGroups = TopicCreationGroup.configuredGroups(sourceConfig);
        } else {
            admin = null;
            topicCreationGroups = null;
        }
        // Note we pass the configState as it performs dynamic transformations under the covers
        return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, producer, admin, topicCreationGroups, offsetReader, offsetWriter, config, configState, metrics, loader, time, retryWithToleranceOperator, herder.statusBackingStore(), executor);
    } else if (task instanceof SinkTask) {
        TransformationChain<SinkRecord> transformationChain = new TransformationChain<>(connConfig.<SinkRecord>transformations(), retryWithToleranceOperator);
        log.info("Initializing: {}", transformationChain);
        SinkConnectorConfig sinkConfig = new SinkConnectorConfig(plugins, connConfig.originalsStrings());
        retryWithToleranceOperator.reporters(sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass));
        WorkerErrantRecordReporter workerErrantRecordReporter = createWorkerErrantRecordReporter(sinkConfig, retryWithToleranceOperator, keyConverter, valueConverter, headerConverter);
        Map<String, Object> consumerProps = consumerConfigs(id, config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
        KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerProps);
        return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverter, valueConverter, headerConverter, transformationChain, consumer, loader, time, retryWithToleranceOperator, workerErrantRecordReporter, herder.statusBackingStore());
    } else {
        log.error("Tasks must be a subclass of either SourceTask or SinkTask and current is {}", task);
        throw new ConnectException("Tasks must be a subclass of either SourceTask or SinkTask");
    }
}
Also used : OffsetStorageWriter(org.apache.kafka.connect.storage.OffsetStorageWriter) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) TopicCreationGroup(org.apache.kafka.connect.util.TopicCreationGroup) SourceRecord(org.apache.kafka.connect.source.SourceRecord) CloseableOffsetStorageReader(org.apache.kafka.connect.storage.CloseableOffsetStorageReader) ErrorHandlingMetrics(org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics) ConnectException(org.apache.kafka.connect.errors.ConnectException) WorkerErrantRecordReporter(org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter) RetryWithToleranceOperator(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator) SinkTask(org.apache.kafka.connect.sink.SinkTask) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl) SourceTask(org.apache.kafka.connect.source.SourceTask) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Aggregations

OffsetStorageReaderImpl (org.apache.kafka.connect.storage.OffsetStorageReaderImpl)6 ConnectException (org.apache.kafka.connect.errors.ConnectException)4 SourceRecord (org.apache.kafka.connect.source.SourceRecord)4 SourceTask (org.apache.kafka.connect.source.SourceTask)4 OffsetStorageReader (org.apache.kafka.connect.storage.OffsetStorageReader)4 OffsetStorageWriter (org.apache.kafka.connect.storage.OffsetStorageWriter)4 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)3 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)3 SinkTask (org.apache.kafka.connect.sink.SinkTask)3 Configuration (io.debezium.config.Configuration)2 Map (java.util.Map)2 CloseableOffsetStorageReader (org.apache.kafka.connect.storage.CloseableOffsetStorageReader)2 FileOffsetBackingStore (org.apache.kafka.connect.storage.FileOffsetBackingStore)2 EmbeddedConfig (io.debezium.embedded.EmbeddedEngine.EmbeddedConfig)1 OffsetCommitPolicy (io.debezium.embedded.spi.OffsetCommitPolicy)1 HashMap (java.util.HashMap)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 ExecutionException (java.util.concurrent.ExecutionException)1 TimeoutException (java.util.concurrent.TimeoutException)1