Search in sources :

Example 1 with TopicAdmin

use of org.apache.kafka.connect.util.TopicAdmin in project kafka by apache.

the class KafkaStatusBackingStore method configure.

@Override
public void configure(final WorkerConfig config) {
    this.statusTopic = config.getString(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG);
    if (this.statusTopic == null || this.statusTopic.trim().length() == 0)
        throw new ConfigException("Must specify topic for connector status.");
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    // we handle retries in this class
    producerProps.put(ProducerConfig.RETRIES_CONFIG, 0);
    // disable idempotence since retries is force to 0
    producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).statusStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(statusTopic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(config.getInt(DistributedConfig.STATUS_STORAGE_PARTITIONS_CONFIG)).replicationFactor(config.getShort(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    Callback<ConsumerRecord<String, byte[]>> readCallback = (error, record) -> read(record);
    this.kafkaLog = createKafkaBasedLog(statusTopic, producerProps, consumerProps, readCallback, topicDescription, adminSupplier);
}
Also used : WorkerConfig(org.apache.kafka.connect.runtime.WorkerConfig) KafkaBasedLog(org.apache.kafka.connect.util.KafkaBasedLog) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) Arrays(java.util.Arrays) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) Supplier(java.util.function.Supplier) Schema(org.apache.kafka.connect.data.Schema) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) Utils(org.apache.kafka.common.utils.Utils) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Callback(org.apache.kafka.connect.util.Callback) ConnectUtils(org.apache.kafka.connect.util.ConnectUtils) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) AbstractStatus(org.apache.kafka.connect.runtime.AbstractStatus) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Collection(java.util.Collection) NewTopic(org.apache.kafka.clients.admin.NewTopic) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ConfigException(org.apache.kafka.common.config.ConfigException) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) Table(org.apache.kafka.connect.util.Table) Objects(java.util.Objects) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Struct(org.apache.kafka.connect.data.Struct) SchemaBuilder(org.apache.kafka.connect.data.SchemaBuilder) Collections(java.util.Collections) TaskStatus(org.apache.kafka.connect.runtime.TaskStatus) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) ConfigException(org.apache.kafka.common.config.ConfigException) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Example 2 with TopicAdmin

use of org.apache.kafka.connect.util.TopicAdmin in project kafka by apache.

the class Worker method buildWorkerTask.

private WorkerTask buildWorkerTask(ClusterConfigState configState, ConnectorConfig connConfig, ConnectorTaskId id, Task task, TaskStatus.Listener statusListener, TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, ClassLoader loader) {
    ErrorHandlingMetrics errorHandlingMetrics = errorHandlingMetrics(id);
    final Class<? extends Connector> connectorClass = plugins.connectorClass(connConfig.getString(ConnectorConfig.CONNECTOR_CLASS_CONFIG));
    RetryWithToleranceOperator retryWithToleranceOperator = new RetryWithToleranceOperator(connConfig.errorRetryTimeout(), connConfig.errorMaxDelayInMillis(), connConfig.errorToleranceType(), Time.SYSTEM);
    retryWithToleranceOperator.metrics(errorHandlingMetrics);
    // Decide which type of worker task we need based on the type of task.
    if (task instanceof SourceTask) {
        SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connConfig.originalsStrings(), config.topicCreationEnable());
        retryWithToleranceOperator.reporters(sourceTaskReporters(id, sourceConfig, errorHandlingMetrics));
        TransformationChain<SourceRecord> transformationChain = new TransformationChain<>(sourceConfig.<SourceRecord>transformations(), retryWithToleranceOperator);
        log.info("Initializing: {}", transformationChain);
        CloseableOffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
        OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetBackingStore, id.connector(), internalKeyConverter, internalValueConverter);
        Map<String, Object> producerProps = producerConfigs(id, "connector-producer-" + id, config, sourceConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
        KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps);
        TopicAdmin admin;
        Map<String, TopicCreationGroup> topicCreationGroups;
        if (config.topicCreationEnable() && sourceConfig.usesTopicCreation()) {
            Map<String, Object> adminProps = adminConfigs(id, "connector-adminclient-" + id, config, sourceConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
            admin = new TopicAdmin(adminProps);
            topicCreationGroups = TopicCreationGroup.configuredGroups(sourceConfig);
        } else {
            admin = null;
            topicCreationGroups = null;
        }
        // Note we pass the configState as it performs dynamic transformations under the covers
        return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, producer, admin, topicCreationGroups, offsetReader, offsetWriter, config, configState, metrics, loader, time, retryWithToleranceOperator, herder.statusBackingStore(), executor);
    } else if (task instanceof SinkTask) {
        TransformationChain<SinkRecord> transformationChain = new TransformationChain<>(connConfig.<SinkRecord>transformations(), retryWithToleranceOperator);
        log.info("Initializing: {}", transformationChain);
        SinkConnectorConfig sinkConfig = new SinkConnectorConfig(plugins, connConfig.originalsStrings());
        retryWithToleranceOperator.reporters(sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass));
        WorkerErrantRecordReporter workerErrantRecordReporter = createWorkerErrantRecordReporter(sinkConfig, retryWithToleranceOperator, keyConverter, valueConverter, headerConverter);
        Map<String, Object> consumerProps = consumerConfigs(id, config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId);
        KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerProps);
        return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverter, valueConverter, headerConverter, transformationChain, consumer, loader, time, retryWithToleranceOperator, workerErrantRecordReporter, herder.statusBackingStore());
    } else {
        log.error("Tasks must be a subclass of either SourceTask or SinkTask and current is {}", task);
        throw new ConnectException("Tasks must be a subclass of either SourceTask or SinkTask");
    }
}
Also used : OffsetStorageWriter(org.apache.kafka.connect.storage.OffsetStorageWriter) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) TopicCreationGroup(org.apache.kafka.connect.util.TopicCreationGroup) SourceRecord(org.apache.kafka.connect.source.SourceRecord) CloseableOffsetStorageReader(org.apache.kafka.connect.storage.CloseableOffsetStorageReader) ErrorHandlingMetrics(org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics) ConnectException(org.apache.kafka.connect.errors.ConnectException) WorkerErrantRecordReporter(org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter) RetryWithToleranceOperator(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator) SinkTask(org.apache.kafka.connect.sink.SinkTask) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl) SourceTask(org.apache.kafka.connect.source.SourceTask) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 3 with TopicAdmin

use of org.apache.kafka.connect.util.TopicAdmin in project kafka by apache.

the class KafkaConfigBackingStore method setupAndCreateKafkaBasedLog.

// package private for testing
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, final WorkerConfig config) {
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).configStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(topic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(1).replicationFactor(config.getShort(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    return createKafkaBasedLog(topic, producerProps, consumerProps, new ConsumeCallback(), topicDescription, adminSupplier);
}
Also used : SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Example 4 with TopicAdmin

use of org.apache.kafka.connect.util.TopicAdmin in project kafka by apache.

the class KafkaOffsetBackingStore method configure.

@Override
public void configure(final WorkerConfig config) {
    String topic = config.getString(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG);
    if (topic == null || topic.trim().length() == 0)
        throw new ConfigException("Offset storage topic must be specified");
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    data = new HashMap<>();
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).offsetStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(topic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(config.getInt(DistributedConfig.OFFSET_STORAGE_PARTITIONS_CONFIG)).replicationFactor(config.getShort(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    offsetLog = createKafkaBasedLog(topic, producerProps, consumerProps, consumedCallback, topicDescription, adminSupplier);
}
Also used : SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) ConfigException(org.apache.kafka.common.config.ConfigException) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Aggregations

HashMap (java.util.HashMap)4 NewTopic (org.apache.kafka.clients.admin.NewTopic)3 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)3 ByteArraySerializer (org.apache.kafka.common.serialization.ByteArraySerializer)3 DistributedConfig (org.apache.kafka.connect.runtime.distributed.DistributedConfig)3 TopicAdmin (org.apache.kafka.connect.util.TopicAdmin)3 Map (java.util.Map)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 ConfigException (org.apache.kafka.common.config.ConfigException)2 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)2 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)2 SharedTopicAdmin (org.apache.kafka.connect.util.SharedTopicAdmin)2 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Objects (java.util.Objects)1