Search in sources :

Example 26 with Plugins

use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.

the class MirrorMaker method addHerder.

private void addHerder(SourceAndTarget sourceAndTarget) {
    log.info("creating herder for " + sourceAndTarget.toString());
    Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
    String advertisedUrl = advertisedBaseUrl + "/" + sourceAndTarget.source();
    String workerId = sourceAndTarget.toString();
    Plugins plugins = new Plugins(workerProps);
    plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig distributedConfig = new DistributedConfig(workerProps);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(distributedConfig);
    // Create the admin client to be shared by all backing stores for this herder
    Map<String, Object> adminProps = new HashMap<>(distributedConfig.originals());
    ConnectUtils.addMetricsContextProperties(adminProps, distributedConfig, kafkaClusterId);
    SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
    offsetBackingStore.configure(distributedConfig);
    Worker worker = new Worker(workerId, time, plugins, distributedConfig, offsetBackingStore, CLIENT_CONFIG_OVERRIDE_POLICY);
    WorkerConfigTransformer configTransformer = worker.configTransformer();
    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
    statusBackingStore.configure(distributedConfig);
    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, distributedConfig, configTransformer, sharedAdmin);
    // Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
    // herder is stopped. MirrorMaker has multiple herders, and having the herder own the close responsibility is much easier than
    // tracking the various shared admin objects in this class.
    Herder herder = new DistributedHerder(distributedConfig, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl, CLIENT_CONFIG_OVERRIDE_POLICY, sharedAdmin);
    herders.put(sourceAndTarget, herder);
}
Also used : KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) WorkerConfigTransformer(org.apache.kafka.connect.runtime.WorkerConfigTransformer) KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) Worker(org.apache.kafka.connect.runtime.Worker) Converter(org.apache.kafka.connect.storage.Converter) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) Herder(org.apache.kafka.connect.runtime.Herder) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 27 with Plugins

use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.

the class ConnectDistributed method startConnect.

public Connect startConnect(Map<String, String> workerProps) {
    log.info("Scanning for plugin classes. This might take a moment ...");
    Plugins plugins = new Plugins(workerProps);
    plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig config = new DistributedConfig(workerProps);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    log.debug("Kafka cluster ID: {}", kafkaClusterId);
    RestServer rest = new RestServer(config);
    rest.initializeServer();
    URI advertisedUrl = rest.advertisedUrl();
    String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
    // Create the admin client to be shared by all backing stores.
    Map<String, Object> adminProps = new HashMap<>(config.originals());
    ConnectUtils.addMetricsContextProperties(adminProps, config, kafkaClusterId);
    SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
    offsetBackingStore.configure(config);
    ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
    Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
    WorkerConfigTransformer configTransformer = worker.configTransformer();
    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
    statusBackingStore.configure(config);
    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config, configTransformer, sharedAdmin);
    // Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
    // herder is stopped. This is easier than having to track and own the lifecycle ourselves.
    DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString(), connectorClientConfigOverridePolicy, sharedAdmin);
    final Connect connect = new Connect(herder, rest);
    log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
    try {
        connect.start();
    } catch (Exception e) {
        log.error("Failed to start Connect", e);
        connect.stop();
        Exit.exit(3);
    }
    return connect;
}
Also used : KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) Connect(org.apache.kafka.connect.runtime.Connect) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) WorkerConfigTransformer(org.apache.kafka.connect.runtime.WorkerConfigTransformer) KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) Worker(org.apache.kafka.connect.runtime.Worker) Converter(org.apache.kafka.connect.storage.Converter) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 28 with Plugins

use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.

the class ConnectorConfig method enrich.

/**
 * Returns an enriched {@link ConfigDef} building upon the {@code ConfigDef}, using the current configuration specified in {@code props} as an input.
 * <p>
 * {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown.
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map<String, String> props, boolean requireFullConfig) {
    ConfigDef newDef = new ConfigDef(baseConfigDef);
    new EnrichablePlugin<Transformation<?>>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, (Class) Transformation.class, props, requireFullConfig) {

        @SuppressWarnings("rawtypes")
        @Override
        protected Set<PluginDesc<Transformation<?>>> plugins() {
            return plugins.transformations();
        }

        @Override
        protected ConfigDef initialConfigDef() {
            // All Transformations get these config parameters implicitly
            return super.initialConfigDef().define(PredicatedTransformation.PREDICATE_CONFIG, Type.STRING, "", Importance.MEDIUM, "The alias of a predicate used to determine whether to apply this transformation.").define(PredicatedTransformation.NEGATE_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, "Whether the configured predicate should be negated.");
        }

        @Override
        protected Stream<Map.Entry<String, ConfigDef.ConfigKey>> configDefsForClass(String typeConfig) {
            return super.configDefsForClass(typeConfig).filter(entry -> {
                // The implicit parameters mask any from the transformer with the same name
                if (PredicatedTransformation.PREDICATE_CONFIG.equals(entry.getKey()) || PredicatedTransformation.NEGATE_CONFIG.equals(entry.getKey())) {
                    log.warn("Transformer config {} is masked by implicit config of that name", entry.getKey());
                    return false;
                } else {
                    return true;
                }
            });
        }

        @Override
        protected ConfigDef config(Transformation<?> transformation) {
            return transformation.config();
        }

        @Override
        protected void validateProps(String prefix) {
            String prefixedNegate = prefix + PredicatedTransformation.NEGATE_CONFIG;
            String prefixedPredicate = prefix + PredicatedTransformation.PREDICATE_CONFIG;
            if (props.containsKey(prefixedNegate) && !props.containsKey(prefixedPredicate)) {
                throw new ConfigException("Config '" + prefixedNegate + "' was provided " + "but there is no config '" + prefixedPredicate + "' defining a predicate to be negated.");
            }
        }
    }.enrich(newDef);
    new EnrichablePlugin<Predicate<?>>("Predicate", PREDICATES_CONFIG, PREDICATES_GROUP, (Class) Predicate.class, props, requireFullConfig) {

        @Override
        protected Set<PluginDesc<Predicate<?>>> plugins() {
            return plugins.predicates();
        }

        @Override
        protected ConfigDef config(Predicate<?> predicate) {
            return predicate.config();
        }
    }.enrich(newDef);
    return newDef;
}
Also used : Plugins(org.apache.kafka.connect.runtime.isolation.Plugins) Range.atLeast(org.apache.kafka.common.config.ConfigDef.Range.atLeast) Type(org.apache.kafka.common.config.ConfigDef.Type) LoggerFactory(org.slf4j.LoggerFactory) PluginDesc(org.apache.kafka.connect.runtime.isolation.PluginDesc) ArrayList(java.util.ArrayList) Transformation(org.apache.kafka.connect.transforms.Transformation) HashSet(java.util.HashSet) ToleranceType(org.apache.kafka.connect.runtime.errors.ToleranceType) Locale(java.util.Locale) Width(org.apache.kafka.common.config.ConfigDef.Width) Map(java.util.Map) Importance(org.apache.kafka.common.config.ConfigDef.Importance) LinkedHashSet(java.util.LinkedHashSet) ConfigDef(org.apache.kafka.common.config.ConfigDef) Utils(org.apache.kafka.common.utils.Utils) ValidString.in(org.apache.kafka.common.config.ConfigDef.ValidString.in) Logger(org.slf4j.Logger) ConnectRecord(org.apache.kafka.connect.connector.ConnectRecord) Set(java.util.Set) ConfigException(org.apache.kafka.common.config.ConfigException) Collectors(java.util.stream.Collectors) List(java.util.List) NonEmptyStringWithoutControlChars.nonEmptyStringWithoutControlChars(org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars.nonEmptyStringWithoutControlChars) Stream(java.util.stream.Stream) Predicate(org.apache.kafka.connect.transforms.predicates.Predicate) AbstractConfig(org.apache.kafka.common.config.AbstractConfig) ConnectException(org.apache.kafka.connect.errors.ConnectException) Modifier(java.lang.reflect.Modifier) Collections(java.util.Collections) Transformation(org.apache.kafka.connect.transforms.Transformation) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) Set(java.util.Set) ConfigException(org.apache.kafka.common.config.ConfigException) Predicate(org.apache.kafka.connect.transforms.predicates.Predicate) Stream(java.util.stream.Stream) ConfigDef(org.apache.kafka.common.config.ConfigDef) Map(java.util.Map)

Example 29 with Plugins

use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.

the class TransformationConfigTest method testEmbeddedConfigHoistField.

@Test
public void testEmbeddedConfigHoistField() {
    // Validate that we can construct a Connector config containing the extended config for the transform
    HashMap<String, String> connProps = new HashMap<>();
    connProps.put("name", "foo");
    connProps.put("connector.class", MockConnector.class.getName());
    connProps.put("transforms", "example");
    connProps.put("transforms.example.type", HoistField.Value.class.getName());
    connProps.put("transforms.example.field", "field");
    // Safe when we're only constructing the config
    Plugins plugins = null;
    new ConnectorConfig(plugins, connProps);
}
Also used : HashMap(java.util.HashMap) MockConnector(org.apache.kafka.connect.tools.MockConnector) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins) Test(org.junit.Test)

Example 30 with Plugins

use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.

the class TransformationConfigTest method testEmbeddedConfigRegexRouter.

@Test
public void testEmbeddedConfigRegexRouter() {
    // Validate that we can construct a Connector config containing the extended config for the transform
    HashMap<String, String> connProps = new HashMap<>();
    connProps.put("name", "foo");
    connProps.put("connector.class", MockConnector.class.getName());
    connProps.put("transforms", "example");
    connProps.put("transforms.example.type", RegexRouter.class.getName());
    connProps.put("transforms.example.regex", "(.*)");
    connProps.put("transforms.example.replacement", "prefix-$1");
    // Safe when we're only constructing the config
    Plugins plugins = null;
    new ConnectorConfig(plugins, connProps);
}
Also used : HashMap(java.util.HashMap) RegexRouter(org.apache.kafka.connect.transforms.RegexRouter) MockConnector(org.apache.kafka.connect.tools.MockConnector) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins) Test(org.junit.Test)

Aggregations

Plugins (org.apache.kafka.connect.runtime.isolation.Plugins)34 HashMap (java.util.HashMap)27 MockConnector (org.apache.kafka.connect.tools.MockConnector)24 Test (org.junit.Test)24 Worker (org.apache.kafka.connect.runtime.Worker)7 URI (java.net.URI)4 Connect (org.apache.kafka.connect.runtime.Connect)4 RestServer (org.apache.kafka.connect.runtime.rest.RestServer)4 DistributedConfig (org.apache.kafka.connect.runtime.distributed.DistributedConfig)3 DistributedHerder (org.apache.kafka.connect.runtime.distributed.DistributedHerder)3 StandaloneConfig (org.apache.kafka.connect.runtime.standalone.StandaloneConfig)3 ConfigBackingStore (org.apache.kafka.connect.storage.ConfigBackingStore)3 Converter (org.apache.kafka.connect.storage.Converter)3 KafkaConfigBackingStore (org.apache.kafka.connect.storage.KafkaConfigBackingStore)3 KafkaOffsetBackingStore (org.apache.kafka.connect.storage.KafkaOffsetBackingStore)3 KafkaStatusBackingStore (org.apache.kafka.connect.storage.KafkaStatusBackingStore)3 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)3 MockTime (org.apache.kafka.common.utils.MockTime)2 Time (org.apache.kafka.common.utils.Time)2 Herder (org.apache.kafka.connect.runtime.Herder)2