Search in sources :

Example 1 with SharedTopicAdmin

use of org.apache.kafka.connect.util.SharedTopicAdmin in project kafka by apache.

the class KafkaStatusBackingStore method configure.

@Override
public void configure(final WorkerConfig config) {
    this.statusTopic = config.getString(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG);
    if (this.statusTopic == null || this.statusTopic.trim().length() == 0)
        throw new ConfigException("Must specify topic for connector status.");
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    // we handle retries in this class
    producerProps.put(ProducerConfig.RETRIES_CONFIG, 0);
    // disable idempotence since retries is force to 0
    producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).statusStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(statusTopic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(config.getInt(DistributedConfig.STATUS_STORAGE_PARTITIONS_CONFIG)).replicationFactor(config.getShort(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    Callback<ConsumerRecord<String, byte[]>> readCallback = (error, record) -> read(record);
    this.kafkaLog = createKafkaBasedLog(statusTopic, producerProps, consumerProps, readCallback, topicDescription, adminSupplier);
}
Also used : WorkerConfig(org.apache.kafka.connect.runtime.WorkerConfig) KafkaBasedLog(org.apache.kafka.connect.util.KafkaBasedLog) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) Arrays(java.util.Arrays) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) Supplier(java.util.function.Supplier) Schema(org.apache.kafka.connect.data.Schema) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) Utils(org.apache.kafka.common.utils.Utils) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Callback(org.apache.kafka.connect.util.Callback) ConnectUtils(org.apache.kafka.connect.util.ConnectUtils) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) AbstractStatus(org.apache.kafka.connect.runtime.AbstractStatus) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Collection(java.util.Collection) NewTopic(org.apache.kafka.clients.admin.NewTopic) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ConfigException(org.apache.kafka.common.config.ConfigException) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) Table(org.apache.kafka.connect.util.Table) Objects(java.util.Objects) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Struct(org.apache.kafka.connect.data.Struct) SchemaBuilder(org.apache.kafka.connect.data.SchemaBuilder) Collections(java.util.Collections) TaskStatus(org.apache.kafka.connect.runtime.TaskStatus) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) ConfigException(org.apache.kafka.common.config.ConfigException) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Example 2 with SharedTopicAdmin

use of org.apache.kafka.connect.util.SharedTopicAdmin in project kafka by apache.

the class MirrorMaker method addHerder.

private void addHerder(SourceAndTarget sourceAndTarget) {
    log.info("creating herder for " + sourceAndTarget.toString());
    Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
    String advertisedUrl = advertisedBaseUrl + "/" + sourceAndTarget.source();
    String workerId = sourceAndTarget.toString();
    Plugins plugins = new Plugins(workerProps);
    plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig distributedConfig = new DistributedConfig(workerProps);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(distributedConfig);
    // Create the admin client to be shared by all backing stores for this herder
    Map<String, Object> adminProps = new HashMap<>(distributedConfig.originals());
    ConnectUtils.addMetricsContextProperties(adminProps, distributedConfig, kafkaClusterId);
    SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
    offsetBackingStore.configure(distributedConfig);
    Worker worker = new Worker(workerId, time, plugins, distributedConfig, offsetBackingStore, CLIENT_CONFIG_OVERRIDE_POLICY);
    WorkerConfigTransformer configTransformer = worker.configTransformer();
    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
    statusBackingStore.configure(distributedConfig);
    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, distributedConfig, configTransformer, sharedAdmin);
    // Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
    // herder is stopped. MirrorMaker has multiple herders, and having the herder own the close responsibility is much easier than
    // tracking the various shared admin objects in this class.
    Herder herder = new DistributedHerder(distributedConfig, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl, CLIENT_CONFIG_OVERRIDE_POLICY, sharedAdmin);
    herders.put(sourceAndTarget, herder);
}
Also used : KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) WorkerConfigTransformer(org.apache.kafka.connect.runtime.WorkerConfigTransformer) KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) Worker(org.apache.kafka.connect.runtime.Worker) Converter(org.apache.kafka.connect.storage.Converter) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) Herder(org.apache.kafka.connect.runtime.Herder) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 3 with SharedTopicAdmin

use of org.apache.kafka.connect.util.SharedTopicAdmin in project kafka by apache.

the class ConnectDistributed method startConnect.

public Connect startConnect(Map<String, String> workerProps) {
    log.info("Scanning for plugin classes. This might take a moment ...");
    Plugins plugins = new Plugins(workerProps);
    plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig config = new DistributedConfig(workerProps);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    log.debug("Kafka cluster ID: {}", kafkaClusterId);
    RestServer rest = new RestServer(config);
    rest.initializeServer();
    URI advertisedUrl = rest.advertisedUrl();
    String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
    // Create the admin client to be shared by all backing stores.
    Map<String, Object> adminProps = new HashMap<>(config.originals());
    ConnectUtils.addMetricsContextProperties(adminProps, config, kafkaClusterId);
    SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
    offsetBackingStore.configure(config);
    ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
    Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
    WorkerConfigTransformer configTransformer = worker.configTransformer();
    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
    statusBackingStore.configure(config);
    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config, configTransformer, sharedAdmin);
    // Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
    // herder is stopped. This is easier than having to track and own the lifecycle ourselves.
    DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString(), connectorClientConfigOverridePolicy, sharedAdmin);
    final Connect connect = new Connect(herder, rest);
    log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
    try {
        connect.start();
    } catch (Exception e) {
        log.error("Failed to start Connect", e);
        connect.stop();
        Exit.exit(3);
    }
    return connect;
}
Also used : KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) Connect(org.apache.kafka.connect.runtime.Connect) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) WorkerConfigTransformer(org.apache.kafka.connect.runtime.WorkerConfigTransformer) KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) Worker(org.apache.kafka.connect.runtime.Worker) Converter(org.apache.kafka.connect.storage.Converter) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 4 with SharedTopicAdmin

use of org.apache.kafka.connect.util.SharedTopicAdmin in project kafka by apache.

the class KafkaOffsetBackingStore method configure.

@Override
public void configure(final WorkerConfig config) {
    String topic = config.getString(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG);
    if (topic == null || topic.trim().length() == 0)
        throw new ConfigException("Offset storage topic must be specified");
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    data = new HashMap<>();
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).offsetStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(topic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(config.getInt(DistributedConfig.OFFSET_STORAGE_PARTITIONS_CONFIG)).replicationFactor(config.getShort(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    offsetLog = createKafkaBasedLog(topic, producerProps, consumerProps, consumedCallback, topicDescription, adminSupplier);
}
Also used : SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) ConfigException(org.apache.kafka.common.config.ConfigException) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Example 5 with SharedTopicAdmin

use of org.apache.kafka.connect.util.SharedTopicAdmin in project kafka by apache.

the class KafkaConfigBackingStore method setupAndCreateKafkaBasedLog.

// package private for testing
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, final WorkerConfig config) {
    String clusterId = ConnectUtils.lookupKafkaClusterId(config);
    Map<String, Object> originals = config.originals();
    Map<String, Object> producerProps = new HashMap<>(originals);
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
    ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
    Map<String, Object> consumerProps = new HashMap<>(originals);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
    Map<String, Object> adminProps = new HashMap<>(originals);
    ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
    Supplier<TopicAdmin> adminSupplier;
    if (topicAdminSupplier != null) {
        adminSupplier = topicAdminSupplier;
    } else {
        // Create our own topic admin supplier that we'll close when we're stopped
        ownTopicAdmin = new SharedTopicAdmin(adminProps);
        adminSupplier = ownTopicAdmin;
    }
    Map<String, Object> topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).configStorageTopicSettings() : Collections.emptyMap();
    NewTopic topicDescription = TopicAdmin.defineTopic(topic).config(// first so that we override user-supplied settings as needed
    topicSettings).compacted().partitions(1).replicationFactor(config.getShort(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG)).build();
    return createKafkaBasedLog(topic, producerProps, consumerProps, new ConsumeCallback(), topicDescription, adminSupplier);
}
Also used : SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) NewTopic(org.apache.kafka.clients.admin.NewTopic) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Aggregations

HashMap (java.util.HashMap)5 DistributedConfig (org.apache.kafka.connect.runtime.distributed.DistributedConfig)5 SharedTopicAdmin (org.apache.kafka.connect.util.SharedTopicAdmin)4 NewTopic (org.apache.kafka.clients.admin.NewTopic)3 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)3 ByteArraySerializer (org.apache.kafka.common.serialization.ByteArraySerializer)3 ConfigException (org.apache.kafka.common.config.ConfigException)2 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)2 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)2 Worker (org.apache.kafka.connect.runtime.Worker)2 WorkerConfigTransformer (org.apache.kafka.connect.runtime.WorkerConfigTransformer)2 DistributedHerder (org.apache.kafka.connect.runtime.distributed.DistributedHerder)2 Plugins (org.apache.kafka.connect.runtime.isolation.Plugins)2 ConfigBackingStore (org.apache.kafka.connect.storage.ConfigBackingStore)2 Converter (org.apache.kafka.connect.storage.Converter)2 KafkaConfigBackingStore (org.apache.kafka.connect.storage.KafkaConfigBackingStore)2 KafkaOffsetBackingStore (org.apache.kafka.connect.storage.KafkaOffsetBackingStore)2 KafkaStatusBackingStore (org.apache.kafka.connect.storage.KafkaStatusBackingStore)2 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)2 TopicAdmin (org.apache.kafka.connect.util.TopicAdmin)2