use of org.apache.kafka.connect.cli.ConnectDistributed in project strimzi-kafka-operator by strimzi.
the class ConnectCluster method startup.
public void startup() throws InterruptedException {
for (int i = 0; i < numNodes; i++) {
Map<String, String> workerProps = new HashMap<>();
workerProps.put("listeners", "http://localhost:" + getPort(i));
workerProps.put("plugin.path", String.join(",", pluginPath));
workerProps.put("group.id", toString());
workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("key.converter.schemas.enable", "false");
workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("value.converter.schemas.enable", "false");
workerProps.put("offset.storage.topic", getClass().getSimpleName() + "-offsets");
workerProps.put("offset.storage.replication.factor", "3");
workerProps.put("config.storage.topic", getClass().getSimpleName() + "-config");
workerProps.put("config.storage.replication.factor", "3");
workerProps.put("status.storage.topic", getClass().getSimpleName() + "-status");
workerProps.put("status.storage.replication.factor", "3");
workerProps.put("bootstrap.servers", brokerList);
// DistributedConfig config = new DistributedConfig(workerProps);
// RestServer rest = new RestServer(config);
// rest.initializeServer();
CountDownLatch l = new CountDownLatch(1);
Thread thread = new Thread(() -> {
ConnectDistributed connectDistributed = new ConnectDistributed();
Connect connect = connectDistributed.startConnect(workerProps);
l.countDown();
connectInstances.add(connect);
connect.awaitStop();
});
thread.setDaemon(false);
thread.start();
l.await();
}
}
use of org.apache.kafka.connect.cli.ConnectDistributed in project strimzi-kafka-operator by strimzi.
the class KafkaConnectApiTest method beforeEach.
@BeforeEach
public void beforeEach() throws IOException, InterruptedException {
// Start a N node connect cluster
Map<String, String> workerProps = new HashMap<>();
workerProps.put("listeners", "http://localhost:" + PORT);
File tempDirectory = Files.createTempDirectory(getClass().getSimpleName()).toFile();
workerProps.put("plugin.path", tempDirectory.toString());
workerProps.put("group.id", toString());
workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("key.converter.schemas.enable", "false");
workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("value.converter.schemas.enable", "false");
workerProps.put("offset.storage.topic", getClass().getSimpleName() + "-offsets");
workerProps.put("config.storage.topic", getClass().getSimpleName() + "-config");
workerProps.put("status.storage.topic", getClass().getSimpleName() + "-status");
workerProps.put("bootstrap.servers", cluster.getBootstrapServers());
// DistributedConfig config = new DistributedConfig(workerProps);
// RestServer rest = new RestServer(config);
// rest.initializeServer();
CountDownLatch l = new CountDownLatch(1);
Thread thread = new Thread(() -> {
ConnectDistributed connectDistributed = new ConnectDistributed();
connect = connectDistributed.startConnect(workerProps);
l.countDown();
connect.awaitStop();
});
thread.setDaemon(false);
thread.start();
l.await();
}
use of org.apache.kafka.connect.cli.ConnectDistributed in project strimzi by strimzi.
the class ConnectCluster method startup.
public void startup() throws InterruptedException {
for (int i = 0; i < numNodes; i++) {
Map<String, String> workerProps = new HashMap<>();
workerProps.put("listeners", "http://localhost:" + getPort(i));
workerProps.put("plugin.path", String.join(",", pluginPath));
workerProps.put("group.id", toString());
workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("key.converter.schemas.enable", "false");
workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("value.converter.schemas.enable", "false");
workerProps.put("offset.storage.topic", getClass().getSimpleName() + "-offsets");
workerProps.put("offset.storage.replication.factor", "3");
workerProps.put("config.storage.topic", getClass().getSimpleName() + "-config");
workerProps.put("config.storage.replication.factor", "3");
workerProps.put("status.storage.topic", getClass().getSimpleName() + "-status");
workerProps.put("status.storage.replication.factor", "3");
workerProps.put("bootstrap.servers", brokerList);
// DistributedConfig config = new DistributedConfig(workerProps);
// RestServer rest = new RestServer(config);
// rest.initializeServer();
CountDownLatch l = new CountDownLatch(1);
Thread thread = new Thread(() -> {
ConnectDistributed connectDistributed = new ConnectDistributed();
Connect connect = connectDistributed.startConnect(workerProps);
l.countDown();
connectInstances.add(connect);
connect.awaitStop();
});
thread.setDaemon(false);
thread.start();
l.await();
}
}
use of org.apache.kafka.connect.cli.ConnectDistributed in project strimzi by strimzi.
the class KafkaConnectApiTest method beforeEach.
@BeforeEach
public void beforeEach() throws IOException, InterruptedException {
// Start a N node connect cluster
Map<String, String> workerProps = new HashMap<>();
workerProps.put("listeners", "http://localhost:" + PORT);
File tempDirectory = Files.createTempDirectory(getClass().getSimpleName()).toFile();
workerProps.put("plugin.path", tempDirectory.toString());
workerProps.put("group.id", toString());
workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("key.converter.schemas.enable", "false");
workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter");
workerProps.put("value.converter.schemas.enable", "false");
workerProps.put("offset.storage.topic", getClass().getSimpleName() + "-offsets");
workerProps.put("config.storage.topic", getClass().getSimpleName() + "-config");
workerProps.put("status.storage.topic", getClass().getSimpleName() + "-status");
workerProps.put("bootstrap.servers", cluster.getBootstrapServers());
// DistributedConfig config = new DistributedConfig(workerProps);
// RestServer rest = new RestServer(config);
// rest.initializeServer();
CountDownLatch l = new CountDownLatch(1);
Thread thread = new Thread(() -> {
ConnectDistributed connectDistributed = new ConnectDistributed();
connect = connectDistributed.startConnect(workerProps);
l.countDown();
connect.awaitStop();
});
thread.setDaemon(false);
thread.start();
l.await();
}
Aggregations