use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.
the class MirrorMaker method addHerder.
private void addHerder(SourceAndTarget sourceAndTarget) {
log.info("creating herder for " + sourceAndTarget.toString());
Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
String advertisedUrl = advertisedBaseUrl + "/" + sourceAndTarget.source();
String workerId = sourceAndTarget.toString();
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig distributedConfig = new DistributedConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(distributedConfig);
// Create the admin client to be shared by all backing stores for this herder
Map<String, Object> adminProps = new HashMap<>(distributedConfig.originals());
ConnectUtils.addMetricsContextProperties(adminProps, distributedConfig, kafkaClusterId);
SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
offsetBackingStore.configure(distributedConfig);
Worker worker = new Worker(workerId, time, plugins, distributedConfig, offsetBackingStore, CLIENT_CONFIG_OVERRIDE_POLICY);
WorkerConfigTransformer configTransformer = worker.configTransformer();
Converter internalValueConverter = worker.getInternalValueConverter();
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
statusBackingStore.configure(distributedConfig);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, distributedConfig, configTransformer, sharedAdmin);
// Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
// herder is stopped. MirrorMaker has multiple herders, and having the herder own the close responsibility is much easier than
// tracking the various shared admin objects in this class.
Herder herder = new DistributedHerder(distributedConfig, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl, CLIENT_CONFIG_OVERRIDE_POLICY, sharedAdmin);
herders.put(sourceAndTarget, herder);
}
use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.
the class ConnectDistributed method startConnect.
public Connect startConnect(Map<String, String> workerProps) {
log.info("Scanning for plugin classes. This might take a moment ...");
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig config = new DistributedConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
log.debug("Kafka cluster ID: {}", kafkaClusterId);
RestServer rest = new RestServer(config);
rest.initializeServer();
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
// Create the admin client to be shared by all backing stores.
Map<String, Object> adminProps = new HashMap<>(config.originals());
ConnectUtils.addMetricsContextProperties(adminProps, config, kafkaClusterId);
SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
offsetBackingStore.configure(config);
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
WorkerConfigTransformer configTransformer = worker.configTransformer();
Converter internalValueConverter = worker.getInternalValueConverter();
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
statusBackingStore.configure(config);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config, configTransformer, sharedAdmin);
// Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
// herder is stopped. This is easier than having to track and own the lifecycle ourselves.
DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString(), connectorClientConfigOverridePolicy, sharedAdmin);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
try {
connect.start();
} catch (Exception e) {
log.error("Failed to start Connect", e);
connect.stop();
Exit.exit(3);
}
return connect;
}
use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.
the class ConnectorConfig method enrich.
/**
* Returns an enriched {@link ConfigDef} building upon the {@code ConfigDef}, using the current configuration specified in {@code props} as an input.
* <p>
* {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map<String, String> props, boolean requireFullConfig) {
ConfigDef newDef = new ConfigDef(baseConfigDef);
new EnrichablePlugin<Transformation<?>>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, (Class) Transformation.class, props, requireFullConfig) {
@SuppressWarnings("rawtypes")
@Override
protected Set<PluginDesc<Transformation<?>>> plugins() {
return plugins.transformations();
}
@Override
protected ConfigDef initialConfigDef() {
// All Transformations get these config parameters implicitly
return super.initialConfigDef().define(PredicatedTransformation.PREDICATE_CONFIG, Type.STRING, "", Importance.MEDIUM, "The alias of a predicate used to determine whether to apply this transformation.").define(PredicatedTransformation.NEGATE_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, "Whether the configured predicate should be negated.");
}
@Override
protected Stream<Map.Entry<String, ConfigDef.ConfigKey>> configDefsForClass(String typeConfig) {
return super.configDefsForClass(typeConfig).filter(entry -> {
// The implicit parameters mask any from the transformer with the same name
if (PredicatedTransformation.PREDICATE_CONFIG.equals(entry.getKey()) || PredicatedTransformation.NEGATE_CONFIG.equals(entry.getKey())) {
log.warn("Transformer config {} is masked by implicit config of that name", entry.getKey());
return false;
} else {
return true;
}
});
}
@Override
protected ConfigDef config(Transformation<?> transformation) {
return transformation.config();
}
@Override
protected void validateProps(String prefix) {
String prefixedNegate = prefix + PredicatedTransformation.NEGATE_CONFIG;
String prefixedPredicate = prefix + PredicatedTransformation.PREDICATE_CONFIG;
if (props.containsKey(prefixedNegate) && !props.containsKey(prefixedPredicate)) {
throw new ConfigException("Config '" + prefixedNegate + "' was provided " + "but there is no config '" + prefixedPredicate + "' defining a predicate to be negated.");
}
}
}.enrich(newDef);
new EnrichablePlugin<Predicate<?>>("Predicate", PREDICATES_CONFIG, PREDICATES_GROUP, (Class) Predicate.class, props, requireFullConfig) {
@Override
protected Set<PluginDesc<Predicate<?>>> plugins() {
return plugins.predicates();
}
@Override
protected ConfigDef config(Predicate<?> predicate) {
return predicate.config();
}
}.enrich(newDef);
return newDef;
}
use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.
the class TransformationConfigTest method testEmbeddedConfigHoistField.
@Test
public void testEmbeddedConfigHoistField() {
// Validate that we can construct a Connector config containing the extended config for the transform
HashMap<String, String> connProps = new HashMap<>();
connProps.put("name", "foo");
connProps.put("connector.class", MockConnector.class.getName());
connProps.put("transforms", "example");
connProps.put("transforms.example.type", HoistField.Value.class.getName());
connProps.put("transforms.example.field", "field");
// Safe when we're only constructing the config
Plugins plugins = null;
new ConnectorConfig(plugins, connProps);
}
use of org.apache.kafka.connect.runtime.isolation.Plugins in project kafka by apache.
the class TransformationConfigTest method testEmbeddedConfigRegexRouter.
@Test
public void testEmbeddedConfigRegexRouter() {
// Validate that we can construct a Connector config containing the extended config for the transform
HashMap<String, String> connProps = new HashMap<>();
connProps.put("name", "foo");
connProps.put("connector.class", MockConnector.class.getName());
connProps.put("transforms", "example");
connProps.put("transforms.example.type", RegexRouter.class.getName());
connProps.put("transforms.example.regex", "(.*)");
connProps.put("transforms.example.replacement", "prefix-$1");
// Safe when we're only constructing the config
Plugins plugins = null;
new ConnectorConfig(plugins, connProps);
}
Aggregations