use of io.strimzi.test.container.StrimziKafkaCluster in project strimzi by strimzi.
the class KafkaConnectApiTest method before.
@BeforeAll
public static void before() throws IOException {
vertx = Vertx.vertx();
final Map<String, String> kafkaClusterConfiguration = new HashMap<>();
kafkaClusterConfiguration.put("zookeeper.connect", "zookeeper:2181");
cluster = new StrimziKafkaCluster(3, 1, kafkaClusterConfiguration);
cluster.start();
}
use of io.strimzi.test.container.StrimziKafkaCluster in project strimzi by strimzi.
the class TopicOperatorMockTest method setup.
@BeforeEach
public void setup(VertxTestContext context) throws Exception {
// Create cluster in @BeforeEach instead of @BeforeAll as once the checkpoints causing premature success were fixed,
// tests were failing due to topic "my-topic" already existing, and trying to delete the topics at the end of the test was timing out occasionally.
// So works best when the cluster is recreated for each test to avoid shared state
Map<String, String> config = new HashMap<>();
config.put("zookeeper.connect", "zookeeper:2181");
kafkaCluster = new StrimziKafkaCluster(1, 1, config);
kafkaCluster.start();
MockKube mockKube = new MockKube();
mockKube.withCustomResourceDefinition(Crds.kafkaTopic(), KafkaTopic.class, KafkaTopicList.class, KafkaTopic::getStatus, KafkaTopic::setStatus);
kubeClient = mockKube.build();
adminClient = AdminClient.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers()));
Config topicConfig = new Config(Map.of(Config.KAFKA_BOOTSTRAP_SERVERS.key, kafkaCluster.getBootstrapServers(), Config.ZOOKEEPER_CONNECT.key, kafkaCluster.getZookeeper().getHost() + ":" + kafkaCluster.getZookeeper().getFirstMappedPort(), Config.ZOOKEEPER_CONNECTION_TIMEOUT_MS.key, "30000", Config.NAMESPACE.key, "myproject", Config.CLIENT_ID.key, "myproject-client-id", Config.FULL_RECONCILIATION_INTERVAL_MS.key, "10000"));
session = new Session(kubeClient, topicConfig);
Checkpoint async = context.checkpoint();
vertx.deployVerticle(session, ar -> {
if (ar.succeeded()) {
deploymentId = ar.result();
topicsConfigWatcher = session.topicConfigsWatcher;
topicWatcher = session.topicWatcher;
topicsWatcher = session.topicsWatcher;
metrics = session.metricsRegistry;
metrics.forEachMeter(meter -> metrics.remove(meter));
async.flag();
} else {
ar.cause().printStackTrace();
context.failNow(new Throwable("Failed to deploy session"));
}
});
if (!context.awaitCompletion(60, TimeUnit.SECONDS)) {
context.failNow(new Throwable("Test timeout"));
}
int timeout = 30_000;
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicWatcher.started());
waitFor("Topic configs watcher not started", 1_000, timeout, () -> this.topicsConfigWatcher.started());
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicsWatcher.started());
}
use of io.strimzi.test.container.StrimziKafkaCluster in project strimzi by strimzi.
the class TopicOperatorReplicationIT method beforeAll.
@BeforeAll
public void beforeAll() throws Exception {
kafkaCluster = new StrimziKafkaCluster(numKafkaBrokers(), numKafkaBrokers(), kafkaClusterConfig());
kafkaCluster.start();
setupKubeCluster();
setup(kafkaCluster);
startTopicOperator(kafkaCluster);
}
use of io.strimzi.test.container.StrimziKafkaCluster in project strimzi-kafka-operator by strimzi.
the class KafkaConnectApiTest method before.
@BeforeAll
public static void before() throws IOException {
vertx = Vertx.vertx();
final Map<String, String> kafkaClusterConfiguration = new HashMap<>();
kafkaClusterConfiguration.put("zookeeper.connect", "zookeeper:2181");
cluster = new StrimziKafkaCluster(3, 1, kafkaClusterConfiguration);
cluster.start();
}
use of io.strimzi.test.container.StrimziKafkaCluster in project strimzi-kafka-operator by strimzi.
the class TopicOperatorMockTest method setup.
@BeforeEach
public void setup(VertxTestContext context) throws Exception {
// Create cluster in @BeforeEach instead of @BeforeAll as once the checkpoints causing premature success were fixed,
// tests were failing due to topic "my-topic" already existing, and trying to delete the topics at the end of the test was timing out occasionally.
// So works best when the cluster is recreated for each test to avoid shared state
Map<String, String> config = new HashMap<>();
config.put("zookeeper.connect", "zookeeper:2181");
kafkaCluster = new StrimziKafkaCluster(1, 1, config);
kafkaCluster.start();
MockKube mockKube = new MockKube();
mockKube.withCustomResourceDefinition(Crds.kafkaTopic(), KafkaTopic.class, KafkaTopicList.class, KafkaTopic::getStatus, KafkaTopic::setStatus);
kubeClient = mockKube.build();
adminClient = AdminClient.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers()));
Config topicConfig = new Config(Map.of(Config.KAFKA_BOOTSTRAP_SERVERS.key, kafkaCluster.getBootstrapServers(), Config.ZOOKEEPER_CONNECT.key, kafkaCluster.getZookeeper().getHost() + ":" + kafkaCluster.getZookeeper().getFirstMappedPort(), Config.ZOOKEEPER_CONNECTION_TIMEOUT_MS.key, "30000", Config.NAMESPACE.key, "myproject", Config.CLIENT_ID.key, "myproject-client-id", Config.FULL_RECONCILIATION_INTERVAL_MS.key, "10000"));
session = new Session(kubeClient, topicConfig);
Checkpoint async = context.checkpoint();
vertx.deployVerticle(session, ar -> {
if (ar.succeeded()) {
deploymentId = ar.result();
topicsConfigWatcher = session.topicConfigsWatcher;
topicWatcher = session.topicWatcher;
topicsWatcher = session.topicsWatcher;
metrics = session.metricsRegistry;
metrics.forEachMeter(meter -> metrics.remove(meter));
async.flag();
} else {
ar.cause().printStackTrace();
context.failNow(new Throwable("Failed to deploy session"));
}
});
if (!context.awaitCompletion(60, TimeUnit.SECONDS)) {
context.failNow(new Throwable("Test timeout"));
}
int timeout = 30_000;
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicWatcher.started());
waitFor("Topic configs watcher not started", 1_000, timeout, () -> this.topicsConfigWatcher.started());
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicsWatcher.started());
}
Aggregations