Search in sources :

Example 1 with ConnectorClientConfigOverridePolicy

use of org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy in project kafka by apache.

the class Worker method connectorClientConfigOverrides.

private static Map<String, Object> connectorClientConfigOverrides(ConnectorTaskId id, ConnectorConfig connConfig, Class<? extends Connector> connectorClass, String clientConfigPrefix, ConnectorType connectorType, ConnectorClientConfigRequest.ClientType clientType, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) {
    Map<String, Object> clientOverrides = connConfig.originalsWithPrefix(clientConfigPrefix);
    ConnectorClientConfigRequest connectorClientConfigRequest = new ConnectorClientConfigRequest(id.connector(), connectorType, connectorClass, clientOverrides, clientType);
    List<ConfigValue> configValues = connectorClientConfigOverridePolicy.validate(connectorClientConfigRequest);
    List<ConfigValue> errorConfigs = configValues.stream().filter(configValue -> configValue.errorMessages().size() > 0).collect(Collectors.toList());
    // These should be caught when the herder validates the connector configuration, but just in case
    if (errorConfigs.size() > 0) {
        throw new ConnectException("Client Config Overrides not allowed " + errorConfigs);
    }
    return clientOverrides;
}
Also used : LoggingContext(org.apache.kafka.connect.util.LoggingContext) SinkUtils(org.apache.kafka.connect.util.SinkUtils) JsonConverterConfig(org.apache.kafka.connect.json.JsonConverterConfig) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins) LoggerFactory(org.slf4j.LoggerFactory) ConnectorType(org.apache.kafka.connect.health.ConnectorType) OffsetStorageWriter(org.apache.kafka.connect.storage.OffsetStorageWriter) ErrorHandlingMetrics(org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics) CloseableOffsetStorageReader(org.apache.kafka.connect.storage.CloseableOffsetStorageReader) ErrorReporter(org.apache.kafka.connect.runtime.errors.ErrorReporter) Converter(org.apache.kafka.connect.storage.Converter) Map(java.util.Map) DeadLetterQueueReporter(org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter) OffsetBackingStore(org.apache.kafka.connect.storage.OffsetBackingStore) OffsetStorageReader(org.apache.kafka.connect.storage.OffsetStorageReader) ClassLoaderUsage(org.apache.kafka.connect.runtime.isolation.Plugins.ClassLoaderUsage) ConnectUtils(org.apache.kafka.connect.util.ConnectUtils) TopicCreationGroup(org.apache.kafka.connect.util.TopicCreationGroup) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConfigValue(org.apache.kafka.common.config.ConfigValue) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) List(java.util.List) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) Task(org.apache.kafka.connect.connector.Task) JsonConverter(org.apache.kafka.connect.json.JsonConverter) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) SourceTask(org.apache.kafka.connect.source.SourceTask) OffsetStorageReaderImpl(org.apache.kafka.connect.storage.OffsetStorageReaderImpl) Connector(org.apache.kafka.connect.connector.Connector) ConfigProvider(org.apache.kafka.common.config.provider.ConfigProvider) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicAdmin(org.apache.kafka.connect.util.TopicAdmin) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) LogReporter(org.apache.kafka.connect.runtime.errors.LogReporter) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HeaderConverter(org.apache.kafka.connect.storage.HeaderConverter) WorkerErrantRecordReporter(org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter) MetricGroup(org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) ExecutorService(java.util.concurrent.ExecutorService) SinkTask(org.apache.kafka.connect.sink.SinkTask) Utils(org.apache.kafka.common.utils.Utils) Callback(org.apache.kafka.connect.util.Callback) Logger(org.slf4j.Logger) ConnectorClientConfigRequest(org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) TimeUnit(java.util.concurrent.TimeUnit) ConnectException(org.apache.kafka.connect.errors.ConnectException) RetryWithToleranceOperator(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator) Collections(java.util.Collections) ConnectorClientConfigRequest(org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest) ConfigValue(org.apache.kafka.common.config.ConfigValue) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 2 with ConnectorClientConfigOverridePolicy

use of org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy in project kafka by apache.

the class ConnectStandalone method main.

public static void main(String[] args) {
    if (args.length < 2 || Arrays.asList(args).contains("--help")) {
        log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
        Exit.exit(1);
    }
    try {
        Time time = Time.SYSTEM;
        log.info("Kafka Connect standalone worker initializing ...");
        long initStart = time.hiResClockMs();
        WorkerInfo initInfo = new WorkerInfo();
        initInfo.logAll();
        String workerPropsFile = args[0];
        Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
        log.info("Scanning for plugin classes. This might take a moment ...");
        Plugins plugins = new Plugins(workerProps);
        plugins.compareAndSwapWithDelegatingLoader();
        StandaloneConfig config = new StandaloneConfig(workerProps);
        String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
        log.debug("Kafka cluster ID: {}", kafkaClusterId);
        RestServer rest = new RestServer(config);
        rest.initializeServer();
        URI advertisedUrl = rest.advertisedUrl();
        String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
        ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
        Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore(), connectorClientConfigOverridePolicy);
        Herder herder = new StandaloneHerder(worker, kafkaClusterId, connectorClientConfigOverridePolicy);
        final Connect connect = new Connect(herder, rest);
        log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
        try {
            connect.start();
            for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
                Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
                FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>((error, info) -> {
                    if (error != null)
                        log.error("Failed to create job for {}", connectorPropsFile);
                    else
                        log.info("Created connector {}", info.result().name());
                });
                herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
                cb.get();
            }
        } catch (Throwable t) {
            log.error("Stopping after connector error", t);
            connect.stop();
            Exit.exit(3);
        }
        // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
        connect.awaitStop();
    } catch (Throwable t) {
        log.error("Stopping due to error", t);
        Exit.exit(2);
    }
}
Also used : Connect(org.apache.kafka.connect.runtime.Connect) WorkerInfo(org.apache.kafka.connect.runtime.WorkerInfo) Time(org.apache.kafka.common.utils.Time) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) Worker(org.apache.kafka.connect.runtime.Worker) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Herder(org.apache.kafka.connect.runtime.Herder) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) FutureCallback(org.apache.kafka.connect.util.FutureCallback) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 3 with ConnectorClientConfigOverridePolicy

use of org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy in project kafka by apache.

the class ConnectDistributed method startConnect.

public Connect startConnect(Map<String, String> workerProps) {
    log.info("Scanning for plugin classes. This might take a moment ...");
    Plugins plugins = new Plugins(workerProps);
    plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig config = new DistributedConfig(workerProps);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    log.debug("Kafka cluster ID: {}", kafkaClusterId);
    RestServer rest = new RestServer(config);
    rest.initializeServer();
    URI advertisedUrl = rest.advertisedUrl();
    String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
    // Create the admin client to be shared by all backing stores.
    Map<String, Object> adminProps = new HashMap<>(config.originals());
    ConnectUtils.addMetricsContextProperties(adminProps, config, kafkaClusterId);
    SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
    offsetBackingStore.configure(config);
    ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
    Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
    WorkerConfigTransformer configTransformer = worker.configTransformer();
    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
    statusBackingStore.configure(config);
    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config, configTransformer, sharedAdmin);
    // Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
    // herder is stopped. This is easier than having to track and own the lifecycle ourselves.
    DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString(), connectorClientConfigOverridePolicy, sharedAdmin);
    final Connect connect = new Connect(herder, rest);
    log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
    try {
        connect.start();
    } catch (Exception e) {
        log.error("Failed to start Connect", e);
        connect.stop();
        Exit.exit(3);
    }
    return connect;
}
Also used : KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder) SharedTopicAdmin(org.apache.kafka.connect.util.SharedTopicAdmin) HashMap(java.util.HashMap) DistributedConfig(org.apache.kafka.connect.runtime.distributed.DistributedConfig) Connect(org.apache.kafka.connect.runtime.Connect) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) WorkerConfigTransformer(org.apache.kafka.connect.runtime.WorkerConfigTransformer) KafkaStatusBackingStore(org.apache.kafka.connect.storage.KafkaStatusBackingStore) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) Worker(org.apache.kafka.connect.runtime.Worker) Converter(org.apache.kafka.connect.storage.Converter) KafkaOffsetBackingStore(org.apache.kafka.connect.storage.KafkaOffsetBackingStore) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 4 with ConnectorClientConfigOverridePolicy

use of org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy in project kafka by apache.

the class AbstractHerderTest method createConfigValidationHerder.

private AbstractHerder createConfigValidationHerder(Class<? extends Connector> connectorClass, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy, int countOfCallingNewConnector) {
    ConfigBackingStore configStore = strictMock(ConfigBackingStore.class);
    StatusBackingStore statusStore = strictMock(StatusBackingStore.class);
    AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class, ConnectorClientConfigOverridePolicy.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore, connectorClientConfigOverridePolicy).addMockedMethod("generation").createMock();
    EasyMock.expect(herder.generation()).andStubReturn(generation);
    // Call to validateConnectorConfig
    EasyMock.expect(worker.configTransformer()).andReturn(transformer).times(2);
    final Capture<Map<String, String>> configCapture = EasyMock.newCapture();
    EasyMock.expect(transformer.transform(EasyMock.capture(configCapture))).andAnswer(configCapture::getValue);
    EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
    final Connector connector;
    try {
        connector = connectorClass.getConstructor().newInstance();
    } catch (ReflectiveOperationException e) {
        throw new RuntimeException("Couldn't create connector", e);
    }
    if (countOfCallingNewConnector > 0) {
        EasyMock.expect(plugins.newConnector(connectorClass.getName())).andReturn(connector).times(countOfCallingNewConnector);
        EasyMock.expect(plugins.compareAndSwapLoaders(connector)).andReturn(classLoader).times(countOfCallingNewConnector);
    }
    return herder;
}
Also used : StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) SourceConnector(org.apache.kafka.connect.source.SourceConnector) Connector(org.apache.kafka.connect.connector.Connector) AllConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy) PrincipalConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.PrincipalConnectorClientConfigOverridePolicy) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) NoneConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.NoneConnectorClientConfigOverridePolicy) Map(java.util.Map) HashMap(java.util.HashMap) ConfigBackingStore(org.apache.kafka.connect.storage.ConfigBackingStore)

Aggregations

ConnectorClientConfigOverridePolicy (org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy)4 HashMap (java.util.HashMap)3 Plugins (org.apache.kafka.connect.runtime.isolation.Plugins)3 URI (java.net.URI)2 Map (java.util.Map)2 Time (org.apache.kafka.common.utils.Time)2 Connector (org.apache.kafka.connect.connector.Connector)2 Connect (org.apache.kafka.connect.runtime.Connect)2 Worker (org.apache.kafka.connect.runtime.Worker)2 ConfigBackingStore (org.apache.kafka.connect.storage.ConfigBackingStore)2 StatusBackingStore (org.apache.kafka.connect.storage.StatusBackingStore)2 ArrayList (java.util.ArrayList)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 List (java.util.List)1 Set (java.util.Set)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 ExecutorService (java.util.concurrent.ExecutorService)1 Executors (java.util.concurrent.Executors)1