use of org.apache.kafka.connect.runtime.Herder in project apache-kafka-on-k8s by banzaicloud.
the class StandaloneHerderTest method expectAdd.
private void expectAdd(SourceSink sourceSink) throws Exception {
Map<String, String> connectorProps = connectorConfig(sourceSink);
ConnectorConfig connConfig = sourceSink == SourceSink.SOURCE ? new SourceConnectorConfig(plugins, connectorProps) : new SinkConnectorConfig(plugins, connectorProps);
worker.startConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(connectorProps), EasyMock.anyObject(HerderConnectorContext.class), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED));
EasyMock.expectLastCall().andReturn(true);
EasyMock.expect(worker.isRunning(CONNECTOR_NAME)).andReturn(true);
ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connectorProps, Arrays.asList(new ConnectorTaskId(CONNECTOR_NAME, 0)), SourceSink.SOURCE == sourceSink ? ConnectorType.SOURCE : ConnectorType.SINK);
createCallback.onCompletion(null, new Herder.Created<>(true, connInfo));
EasyMock.expectLastCall();
// And we should instantiate the tasks. For a sink task, we should see added properties for the input topic partitions
Map<String, String> generatedTaskProps = taskConfig(sourceSink);
EasyMock.expect(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).andReturn(singletonList(generatedTaskProps));
worker.startTask(new ConnectorTaskId(CONNECTOR_NAME, 0), connectorConfig(sourceSink), generatedTaskProps, herder, TargetState.STARTED);
EasyMock.expectLastCall().andReturn(true);
EasyMock.expect(herder.connectorTypeForClass(BogusSourceConnector.class.getName())).andReturn(ConnectorType.SOURCE).anyTimes();
EasyMock.expect(herder.connectorTypeForClass(BogusSinkConnector.class.getName())).andReturn(ConnectorType.SINK).anyTimes();
worker.isSinkConnector(CONNECTOR_NAME);
PowerMock.expectLastCall().andReturn(sourceSink == SourceSink.SINK);
}
use of org.apache.kafka.connect.runtime.Herder in project apache-kafka-on-k8s by banzaicloud.
the class ConnectStandalone method main.
public static void main(String[] args) throws Exception {
if (args.length < 2) {
log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
Exit.exit(1);
}
try {
Time time = Time.SYSTEM;
log.info("Kafka Connect standalone worker initializing ...");
long initStart = time.hiResClockMs();
WorkerInfo initInfo = new WorkerInfo();
initInfo.logAll();
String workerPropsFile = args[0];
Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
log.info("Scanning for plugin classes. This might take a moment ...");
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
StandaloneConfig config = new StandaloneConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
log.debug("Kafka cluster ID: {}", kafkaClusterId);
RestServer rest = new RestServer(config);
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore());
Herder herder = new StandaloneHerder(worker, kafkaClusterId);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
try {
connect.start();
for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
log.error("Failed to create job for {}", connectorPropsFile);
else
log.info("Created connector {}", info.result().name());
}
});
herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
cb.get();
}
} catch (Throwable t) {
log.error("Stopping after connector error", t);
connect.stop();
Exit.exit(3);
}
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
connect.awaitStop();
} catch (Throwable t) {
log.error("Stopping due to error", t);
Exit.exit(2);
}
}
use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.
the class MirrorMaker method stop.
public void stop() {
boolean wasShuttingDown = shutdown.getAndSet(true);
if (!wasShuttingDown) {
log.info("Kafka MirrorMaker stopping");
for (Herder herder : herders.values()) {
try {
herder.stop();
} finally {
stopLatch.countDown();
}
}
log.info("Kafka MirrorMaker stopped.");
}
}
use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.
the class ConnectStandalone method main.
public static void main(String[] args) {
if (args.length < 2 || Arrays.asList(args).contains("--help")) {
log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
Exit.exit(1);
}
try {
Time time = Time.SYSTEM;
log.info("Kafka Connect standalone worker initializing ...");
long initStart = time.hiResClockMs();
WorkerInfo initInfo = new WorkerInfo();
initInfo.logAll();
String workerPropsFile = args[0];
Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
log.info("Scanning for plugin classes. This might take a moment ...");
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
StandaloneConfig config = new StandaloneConfig(workerProps);
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
log.debug("Kafka cluster ID: {}", kafkaClusterId);
RestServer rest = new RestServer(config);
rest.initializeServer();
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore(), connectorClientConfigOverridePolicy);
Herder herder = new StandaloneHerder(worker, kafkaClusterId, connectorClientConfigOverridePolicy);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
try {
connect.start();
for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>((error, info) -> {
if (error != null)
log.error("Failed to create job for {}", connectorPropsFile);
else
log.info("Created connector {}", info.result().name());
});
herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
cb.get();
}
} catch (Throwable t) {
log.error("Stopping after connector error", t);
connect.stop();
Exit.exit(3);
}
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
connect.awaitStop();
} catch (Throwable t) {
log.error("Stopping due to error", t);
Exit.exit(2);
}
}
use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.
the class DistributedHerderTest method testDestroyConnector.
@Test
public void testDestroyConnector() throws Exception {
EasyMock.expect(member.memberId()).andStubReturn("leader");
EasyMock.expect(member.currentProtocolVersion()).andStubReturn(CONNECT_PROTOCOL_V0);
// Start with one connector
EasyMock.expect(worker.getPlugins()).andReturn(plugins);
expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList());
expectPostRebalanceCatchup(SNAPSHOT);
Capture<Callback<TargetState>> onStart = newCapture();
worker.startConnector(EasyMock.eq(CONN1), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED), capture(onStart));
PowerMock.expectLastCall().andAnswer(() -> {
onStart.getValue().onCompletion(null, TargetState.STARTED);
return true;
});
EasyMock.expect(worker.isRunning(CONN1)).andReturn(true);
EasyMock.expect(worker.connectorTaskConfigs(CONN1, conn1SinkConfig)).andReturn(TASK_CONFIGS);
// And delete the connector
member.wakeup();
PowerMock.expectLastCall();
configBackingStore.removeConnectorConfig(CONN1);
PowerMock.expectLastCall();
putConnectorCallback.onCompletion(null, new Herder.Created<>(false, null));
PowerMock.expectLastCall();
member.poll(EasyMock.anyInt());
PowerMock.expectLastCall();
// The change eventually is reflected to the config topic and the deleted connector and
// tasks are revoked
member.wakeup();
PowerMock.expectLastCall();
TopicStatus fooStatus = new TopicStatus(FOO_TOPIC, CONN1, 0, time.milliseconds());
TopicStatus barStatus = new TopicStatus(BAR_TOPIC, CONN1, 0, time.milliseconds());
EasyMock.expect(statusBackingStore.getAllTopics(EasyMock.eq(CONN1))).andReturn(new HashSet<>(Arrays.asList(fooStatus, barStatus))).times(2);
statusBackingStore.deleteTopic(EasyMock.eq(CONN1), EasyMock.eq(FOO_TOPIC));
PowerMock.expectLastCall().times(2);
statusBackingStore.deleteTopic(EasyMock.eq(CONN1), EasyMock.eq(BAR_TOPIC));
PowerMock.expectLastCall().times(2);
expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 2, Collections.emptyList(), Collections.emptyList(), 0);
expectPostRebalanceCatchup(ClusterConfigState.EMPTY);
member.requestRejoin();
PowerMock.expectLastCall();
PowerMock.replayAll();
herder.deleteConnectorConfig(CONN1, putConnectorCallback);
herder.tick();
time.sleep(1000L);
assertStatistics("leaderUrl", false, 3, 1, 100, 1000L);
// read updated config that removes the connector
configUpdateListener.onConnectorConfigRemove(CONN1);
herder.configState = ClusterConfigState.EMPTY;
herder.tick();
time.sleep(1000L);
assertStatistics("leaderUrl", true, 3, 1, 100, 2100L);
PowerMock.verifyAll();
}
Aggregations