use of org.apache.kafka.connect.storage.ConfigBackingStore in project apache-kafka-on-k8s by banzaicloud.
the class ConnectDistributed method main.
public static void main(String[] args) throws Exception {
if (args.length < 1) {
log.info("Usage: ConnectDistributed worker.properties");
Exit.exit(1);
}
try {
Time time = Time.SYSTEM;
log.info("Kafka Connect distributed worker initializing ...");
long initStart = time.hiResClockMs();
WorkerInfo initInfo = new WorkerInfo();
initInfo.logAll();
String workerPropsFile = args[0];
Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
log.info("Scanning for plugin classes. This might take a moment ...");
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig config = new DistributedConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
log.debug("Kafka cluster ID: {}", kafkaClusterId);
RestServer rest = new RestServer(config);
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
offsetBackingStore.configure(config);
Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore);
Converter internalValueConverter = worker.getInternalValueConverter();
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter);
statusBackingStore.configure(config);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config);
DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString());
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
try {
connect.start();
} catch (Exception e) {
log.error("Failed to start Connect", e);
connect.stop();
Exit.exit(3);
}
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
connect.awaitStop();
} catch (Throwable t) {
log.error("Stopping due to error", t);
Exit.exit(2);
}
}
use of org.apache.kafka.connect.storage.ConfigBackingStore in project apache-kafka-on-k8s by banzaicloud.
the class AbstractHerderTest method createConfigValidationHerder.
private AbstractHerder createConfigValidationHerder(Class<? extends Connector> connectorClass) {
ConfigBackingStore configStore = strictMock(ConfigBackingStore.class);
StatusBackingStore statusStore = strictMock(StatusBackingStore.class);
AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore).addMockedMethod("generation").createMock();
EasyMock.expect(herder.generation()).andStubReturn(generation);
// Call to validateConnectorConfig
EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
final Connector connector;
try {
connector = connectorClass.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Couldn't create connector", e);
}
EasyMock.expect(plugins.newConnector(connectorClass.getName())).andReturn(connector);
EasyMock.expect(plugins.compareAndSwapLoaders(connector)).andReturn(classLoader);
return herder;
}
use of org.apache.kafka.connect.storage.ConfigBackingStore in project kafka by apache.
the class MirrorMaker method addHerder.
private void addHerder(SourceAndTarget sourceAndTarget) {
log.info("creating herder for " + sourceAndTarget.toString());
Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
String advertisedUrl = advertisedBaseUrl + "/" + sourceAndTarget.source();
String workerId = sourceAndTarget.toString();
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig distributedConfig = new DistributedConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(distributedConfig);
// Create the admin client to be shared by all backing stores for this herder
Map<String, Object> adminProps = new HashMap<>(distributedConfig.originals());
ConnectUtils.addMetricsContextProperties(adminProps, distributedConfig, kafkaClusterId);
SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
offsetBackingStore.configure(distributedConfig);
Worker worker = new Worker(workerId, time, plugins, distributedConfig, offsetBackingStore, CLIENT_CONFIG_OVERRIDE_POLICY);
WorkerConfigTransformer configTransformer = worker.configTransformer();
Converter internalValueConverter = worker.getInternalValueConverter();
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
statusBackingStore.configure(distributedConfig);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, distributedConfig, configTransformer, sharedAdmin);
// Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
// herder is stopped. MirrorMaker has multiple herders, and having the herder own the close responsibility is much easier than
// tracking the various shared admin objects in this class.
Herder herder = new DistributedHerder(distributedConfig, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl, CLIENT_CONFIG_OVERRIDE_POLICY, sharedAdmin);
herders.put(sourceAndTarget, herder);
}
use of org.apache.kafka.connect.storage.ConfigBackingStore in project kafka by apache.
the class ConnectDistributed method startConnect.
public Connect startConnect(Map<String, String> workerProps) {
log.info("Scanning for plugin classes. This might take a moment ...");
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig config = new DistributedConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
log.debug("Kafka cluster ID: {}", kafkaClusterId);
RestServer rest = new RestServer(config);
rest.initializeServer();
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
// Create the admin client to be shared by all backing stores.
Map<String, Object> adminProps = new HashMap<>(config.originals());
ConnectUtils.addMetricsContextProperties(adminProps, config, kafkaClusterId);
SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps);
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin);
offsetBackingStore.configure(config);
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
WorkerConfigTransformer configTransformer = worker.configTransformer();
Converter internalValueConverter = worker.getInternalValueConverter();
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter, sharedAdmin);
statusBackingStore.configure(config);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(internalValueConverter, config, configTransformer, sharedAdmin);
// Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
// herder is stopped. This is easier than having to track and own the lifecycle ourselves.
DistributedHerder herder = new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, advertisedUrl.toString(), connectorClientConfigOverridePolicy, sharedAdmin);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
try {
connect.start();
} catch (Exception e) {
log.error("Failed to start Connect", e);
connect.stop();
Exit.exit(3);
}
return connect;
}
use of org.apache.kafka.connect.storage.ConfigBackingStore in project kafka by apache.
the class AbstractHerderTest method createConfigValidationHerder.
private AbstractHerder createConfigValidationHerder(Class<? extends Connector> connectorClass, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy, int countOfCallingNewConnector) {
ConfigBackingStore configStore = strictMock(ConfigBackingStore.class);
StatusBackingStore statusStore = strictMock(StatusBackingStore.class);
AbstractHerder herder = partialMockBuilder(AbstractHerder.class).withConstructor(Worker.class, String.class, String.class, StatusBackingStore.class, ConfigBackingStore.class, ConnectorClientConfigOverridePolicy.class).withArgs(worker, workerId, kafkaClusterId, statusStore, configStore, connectorClientConfigOverridePolicy).addMockedMethod("generation").createMock();
EasyMock.expect(herder.generation()).andStubReturn(generation);
// Call to validateConnectorConfig
EasyMock.expect(worker.configTransformer()).andReturn(transformer).times(2);
final Capture<Map<String, String>> configCapture = EasyMock.newCapture();
EasyMock.expect(transformer.transform(EasyMock.capture(configCapture))).andAnswer(configCapture::getValue);
EasyMock.expect(worker.getPlugins()).andStubReturn(plugins);
final Connector connector;
try {
connector = connectorClass.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Couldn't create connector", e);
}
if (countOfCallingNewConnector > 0) {
EasyMock.expect(plugins.newConnector(connectorClass.getName())).andReturn(connector).times(countOfCallingNewConnector);
EasyMock.expect(plugins.compareAndSwapLoaders(connector)).andReturn(classLoader).times(countOfCallingNewConnector);
}
return herder;
}
Aggregations