Search in sources :

Example 1 with Herder

use of org.apache.kafka.connect.runtime.Herder in project apache-kafka-on-k8s by banzaicloud.

the class StandaloneHerderTest method expectAdd.

private void expectAdd(SourceSink sourceSink) throws Exception {
    Map<String, String> connectorProps = connectorConfig(sourceSink);
    ConnectorConfig connConfig = sourceSink == SourceSink.SOURCE ? new SourceConnectorConfig(plugins, connectorProps) : new SinkConnectorConfig(plugins, connectorProps);
    worker.startConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(connectorProps), EasyMock.anyObject(HerderConnectorContext.class), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED));
    EasyMock.expectLastCall().andReturn(true);
    EasyMock.expect(worker.isRunning(CONNECTOR_NAME)).andReturn(true);
    ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connectorProps, Arrays.asList(new ConnectorTaskId(CONNECTOR_NAME, 0)), SourceSink.SOURCE == sourceSink ? ConnectorType.SOURCE : ConnectorType.SINK);
    createCallback.onCompletion(null, new Herder.Created<>(true, connInfo));
    EasyMock.expectLastCall();
    // And we should instantiate the tasks. For a sink task, we should see added properties for the input topic partitions
    Map<String, String> generatedTaskProps = taskConfig(sourceSink);
    EasyMock.expect(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).andReturn(singletonList(generatedTaskProps));
    worker.startTask(new ConnectorTaskId(CONNECTOR_NAME, 0), connectorConfig(sourceSink), generatedTaskProps, herder, TargetState.STARTED);
    EasyMock.expectLastCall().andReturn(true);
    EasyMock.expect(herder.connectorTypeForClass(BogusSourceConnector.class.getName())).andReturn(ConnectorType.SOURCE).anyTimes();
    EasyMock.expect(herder.connectorTypeForClass(BogusSinkConnector.class.getName())).andReturn(ConnectorType.SINK).anyTimes();
    worker.isSinkConnector(CONNECTOR_NAME);
    PowerMock.expectLastCall().andReturn(sourceSink == SourceSink.SINK);
}
Also used : SourceConnectorConfig(org.apache.kafka.connect.runtime.SourceConnectorConfig) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) SourceConnectorConfig(org.apache.kafka.connect.runtime.SourceConnectorConfig) ConnectorConfig(org.apache.kafka.connect.runtime.ConnectorConfig) SinkConnectorConfig(org.apache.kafka.connect.runtime.SinkConnectorConfig) HerderConnectorContext(org.apache.kafka.connect.runtime.HerderConnectorContext) Herder(org.apache.kafka.connect.runtime.Herder) SinkConnectorConfig(org.apache.kafka.connect.runtime.SinkConnectorConfig)

Example 2 with Herder

use of org.apache.kafka.connect.runtime.Herder in project apache-kafka-on-k8s by banzaicloud.

the class ConnectStandalone method main.

public static void main(String[] args) throws Exception {
    if (args.length < 2) {
        log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
        Exit.exit(1);
    }
    try {
        Time time = Time.SYSTEM;
        log.info("Kafka Connect standalone worker initializing ...");
        long initStart = time.hiResClockMs();
        WorkerInfo initInfo = new WorkerInfo();
        initInfo.logAll();
        String workerPropsFile = args[0];
        Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
        log.info("Scanning for plugin classes. This might take a moment ...");
        Plugins plugins = new Plugins(workerProps);
        plugins.compareAndSwapWithDelegatingLoader();
        StandaloneConfig config = new StandaloneConfig(workerProps);
        String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
        log.debug("Kafka cluster ID: {}", kafkaClusterId);
        RestServer rest = new RestServer(config);
        URI advertisedUrl = rest.advertisedUrl();
        String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
        Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore());
        Herder herder = new StandaloneHerder(worker, kafkaClusterId);
        final Connect connect = new Connect(herder, rest);
        log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
        try {
            connect.start();
            for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
                Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
                FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {

                    @Override
                    public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
                        if (error != null)
                            log.error("Failed to create job for {}", connectorPropsFile);
                        else
                            log.info("Created connector {}", info.result().name());
                    }
                });
                herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
                cb.get();
            }
        } catch (Throwable t) {
            log.error("Stopping after connector error", t);
            connect.stop();
            Exit.exit(3);
        }
        // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
        connect.awaitStop();
    } catch (Throwable t) {
        log.error("Stopping due to error", t);
        Exit.exit(2);
    }
}
Also used : ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) Connect(org.apache.kafka.connect.runtime.Connect) WorkerInfo(org.apache.kafka.connect.runtime.WorkerInfo) Time(org.apache.kafka.common.utils.Time) URI(java.net.URI) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) Worker(org.apache.kafka.connect.runtime.Worker) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Herder(org.apache.kafka.connect.runtime.Herder) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) FutureCallback(org.apache.kafka.connect.util.FutureCallback) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 3 with Herder

use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.

the class MirrorMaker method stop.

public void stop() {
    boolean wasShuttingDown = shutdown.getAndSet(true);
    if (!wasShuttingDown) {
        log.info("Kafka MirrorMaker stopping");
        for (Herder herder : herders.values()) {
            try {
                herder.stop();
            } finally {
                stopLatch.countDown();
            }
        }
        log.info("Kafka MirrorMaker stopped.");
    }
}
Also used : Herder(org.apache.kafka.connect.runtime.Herder) DistributedHerder(org.apache.kafka.connect.runtime.distributed.DistributedHerder)

Example 4 with Herder

use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.

the class ConnectStandalone method main.

public static void main(String[] args) {
    if (args.length < 2 || Arrays.asList(args).contains("--help")) {
        log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
        Exit.exit(1);
    }
    try {
        Time time = Time.SYSTEM;
        log.info("Kafka Connect standalone worker initializing ...");
        long initStart = time.hiResClockMs();
        WorkerInfo initInfo = new WorkerInfo();
        initInfo.logAll();
        String workerPropsFile = args[0];
        Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
        log.info("Scanning for plugin classes. This might take a moment ...");
        Plugins plugins = new Plugins(workerProps);
        plugins.compareAndSwapWithDelegatingLoader();
        StandaloneConfig config = new StandaloneConfig(workerProps);
        String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
        log.debug("Kafka cluster ID: {}", kafkaClusterId);
        RestServer rest = new RestServer(config);
        rest.initializeServer();
        URI advertisedUrl = rest.advertisedUrl();
        String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
        ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG), config, ConnectorClientConfigOverridePolicy.class);
        Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore(), connectorClientConfigOverridePolicy);
        Herder herder = new StandaloneHerder(worker, kafkaClusterId, connectorClientConfigOverridePolicy);
        final Connect connect = new Connect(herder, rest);
        log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
        try {
            connect.start();
            for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
                Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
                FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>((error, info) -> {
                    if (error != null)
                        log.error("Failed to create job for {}", connectorPropsFile);
                    else
                        log.info("Created connector {}", info.result().name());
                });
                herder.putConnectorConfig(connectorProps.get(ConnectorConfig.NAME_CONFIG), connectorProps, false, cb);
                cb.get();
            }
        } catch (Throwable t) {
            log.error("Stopping after connector error", t);
            connect.stop();
            Exit.exit(3);
        }
        // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
        connect.awaitStop();
    } catch (Throwable t) {
        log.error("Stopping due to error", t);
        Exit.exit(2);
    }
}
Also used : Connect(org.apache.kafka.connect.runtime.Connect) WorkerInfo(org.apache.kafka.connect.runtime.WorkerInfo) Time(org.apache.kafka.common.utils.Time) ConnectorClientConfigOverridePolicy(org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy) URI(java.net.URI) FileOffsetBackingStore(org.apache.kafka.connect.storage.FileOffsetBackingStore) RestServer(org.apache.kafka.connect.runtime.rest.RestServer) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) Worker(org.apache.kafka.connect.runtime.Worker) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) Herder(org.apache.kafka.connect.runtime.Herder) StandaloneHerder(org.apache.kafka.connect.runtime.standalone.StandaloneHerder) FutureCallback(org.apache.kafka.connect.util.FutureCallback) Plugins(org.apache.kafka.connect.runtime.isolation.Plugins)

Example 5 with Herder

use of org.apache.kafka.connect.runtime.Herder in project kafka by apache.

the class DistributedHerderTest method testDestroyConnector.

@Test
public void testDestroyConnector() throws Exception {
    EasyMock.expect(member.memberId()).andStubReturn("leader");
    EasyMock.expect(member.currentProtocolVersion()).andStubReturn(CONNECT_PROTOCOL_V0);
    // Start with one connector
    EasyMock.expect(worker.getPlugins()).andReturn(plugins);
    expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList());
    expectPostRebalanceCatchup(SNAPSHOT);
    Capture<Callback<TargetState>> onStart = newCapture();
    worker.startConnector(EasyMock.eq(CONN1), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED), capture(onStart));
    PowerMock.expectLastCall().andAnswer(() -> {
        onStart.getValue().onCompletion(null, TargetState.STARTED);
        return true;
    });
    EasyMock.expect(worker.isRunning(CONN1)).andReturn(true);
    EasyMock.expect(worker.connectorTaskConfigs(CONN1, conn1SinkConfig)).andReturn(TASK_CONFIGS);
    // And delete the connector
    member.wakeup();
    PowerMock.expectLastCall();
    configBackingStore.removeConnectorConfig(CONN1);
    PowerMock.expectLastCall();
    putConnectorCallback.onCompletion(null, new Herder.Created<>(false, null));
    PowerMock.expectLastCall();
    member.poll(EasyMock.anyInt());
    PowerMock.expectLastCall();
    // The change eventually is reflected to the config topic and the deleted connector and
    // tasks are revoked
    member.wakeup();
    PowerMock.expectLastCall();
    TopicStatus fooStatus = new TopicStatus(FOO_TOPIC, CONN1, 0, time.milliseconds());
    TopicStatus barStatus = new TopicStatus(BAR_TOPIC, CONN1, 0, time.milliseconds());
    EasyMock.expect(statusBackingStore.getAllTopics(EasyMock.eq(CONN1))).andReturn(new HashSet<>(Arrays.asList(fooStatus, barStatus))).times(2);
    statusBackingStore.deleteTopic(EasyMock.eq(CONN1), EasyMock.eq(FOO_TOPIC));
    PowerMock.expectLastCall().times(2);
    statusBackingStore.deleteTopic(EasyMock.eq(CONN1), EasyMock.eq(BAR_TOPIC));
    PowerMock.expectLastCall().times(2);
    expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 2, Collections.emptyList(), Collections.emptyList(), 0);
    expectPostRebalanceCatchup(ClusterConfigState.EMPTY);
    member.requestRejoin();
    PowerMock.expectLastCall();
    PowerMock.replayAll();
    herder.deleteConnectorConfig(CONN1, putConnectorCallback);
    herder.tick();
    time.sleep(1000L);
    assertStatistics("leaderUrl", false, 3, 1, 100, 1000L);
    // read updated config that removes the connector
    configUpdateListener.onConnectorConfigRemove(CONN1);
    herder.configState = ClusterConfigState.EMPTY;
    herder.tick();
    time.sleep(1000L);
    assertStatistics("leaderUrl", true, 3, 1, 100, 2100L);
    PowerMock.verifyAll();
}
Also used : FutureCallback(org.apache.kafka.connect.util.FutureCallback) Callback(org.apache.kafka.connect.util.Callback) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) Herder(org.apache.kafka.connect.runtime.Herder) HashSet(java.util.HashSet) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

Herder (org.apache.kafka.connect.runtime.Herder)19 ConnectorInfo (org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo)14 FutureCallback (org.apache.kafka.connect.util.FutureCallback)14 Test (org.junit.Test)13 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)13 Connector (org.apache.kafka.connect.connector.Connector)10 SinkConnector (org.apache.kafka.connect.sink.SinkConnector)10 SourceConnector (org.apache.kafka.connect.source.SourceConnector)10 WorkerConnector (org.apache.kafka.connect.runtime.WorkerConnector)9 Callback (org.apache.kafka.connect.util.Callback)8 ConnectorTaskId (org.apache.kafka.connect.util.ConnectorTaskId)7 HashMap (java.util.HashMap)5 HerderConnectorContext (org.apache.kafka.connect.runtime.HerderConnectorContext)5 Map (java.util.Map)4 ClusterConfigState (org.apache.kafka.connect.runtime.distributed.ClusterConfigState)4 RestartPlan (org.apache.kafka.connect.runtime.RestartPlan)3 RestartRequest (org.apache.kafka.connect.runtime.RestartRequest)3 SourceConnectorConfig (org.apache.kafka.connect.runtime.SourceConnectorConfig)3 Worker (org.apache.kafka.connect.runtime.Worker)3 DistributedHerder (org.apache.kafka.connect.runtime.distributed.DistributedHerder)3