use of io.strimzi.test.mockkube2.MockKube2 in project strimzi by strimzi.
the class TopicOperatorMockTest method setup.
@BeforeEach
public void setup(VertxTestContext context) throws Exception {
// Create cluster in @BeforeEach instead of @BeforeAll as once the checkpoints causing premature success were fixed,
// tests were failing due to topic "my-topic" already existing, and trying to delete the topics at the end of the test was timing out occasionally.
// So works best when the cluster is recreated for each test to avoid shared state
Map<String, String> config = new HashMap<>();
config.put("zookeeper.connect", "zookeeper:2181");
kafkaCluster = new StrimziKafkaCluster(1, 1, config);
kafkaCluster.start();
// Configure the Kubernetes Mock
mockKube = new MockKube2.MockKube2Builder(client).withKafkaTopicCrd().build();
mockKube.start();
// Configure the namespace
client.getConfiguration().setNamespace(NAMESPACE);
adminClient = AdminClient.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers()));
Config topicConfig = new Config(Map.of(Config.KAFKA_BOOTSTRAP_SERVERS.key, kafkaCluster.getBootstrapServers(), Config.ZOOKEEPER_CONNECT.key, kafkaCluster.getZookeeper().getHost() + ":" + kafkaCluster.getZookeeper().getFirstMappedPort(), Config.ZOOKEEPER_CONNECTION_TIMEOUT_MS.key, "30000", Config.NAMESPACE.key, NAMESPACE, Config.CLIENT_ID.key, "myproject-client-id", Config.FULL_RECONCILIATION_INTERVAL_MS.key, "10000"));
session = new Session(client, topicConfig);
Checkpoint async = context.checkpoint();
vertx.deployVerticle(session, ar -> {
if (ar.succeeded()) {
deploymentId = ar.result();
topicsConfigWatcher = session.topicConfigsWatcher;
topicWatcher = session.topicWatcher;
topicsWatcher = session.topicsWatcher;
metrics = session.metricsRegistry;
metrics.forEachMeter(meter -> metrics.remove(meter));
async.flag();
} else {
ar.cause().printStackTrace();
context.failNow(new Throwable("Failed to deploy session"));
}
});
if (!context.awaitCompletion(60, TimeUnit.SECONDS)) {
context.failNow(new Throwable("Test timeout"));
}
int timeout = 30_000;
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicWatcher.started());
waitFor("Topic configs watcher not started", 1_000, timeout, () -> this.topicsConfigWatcher.started());
waitFor("Topic watcher not started", 1_000, timeout, () -> this.topicsWatcher.started());
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorResourceMetricsMoveConnectToOtherOperator.
// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsMoveConnectToOtherOperator(VertxTestContext context) {
String connectName1 = "cluster1";
String connectName2 = "cluster2";
String connectorName1 = "connector1";
String connectorName2 = "connector2";
when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
KafkaConnect kafkaConnect1 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName1).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
KafkaConnect kafkaConnect2 = new KafkaConnectBuilder(kafkaConnect1).editMetadata().withName(connectName2).endMetadata().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect1);
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect2);
waitForConnectReady(connectName1);
waitForConnectReady(connectName2);
KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName1).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
KafkaConnector connector2 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName2).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName2).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
waitForConnectorPaused(connectorName1);
waitForConnectorPaused(connectorName2);
MeterRegistry meterRegistry = metricsProvider.meterRegistry();
Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
Promise<Void> reconciled1 = Promise.promise();
Promise<Void> reconciled2 = Promise.promise();
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
Checkpoint async = context.checkpoint();
reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
assertThat(resources.value(), is(2.0));
Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
assertThat(resourcesPaused.value(), is(2.0));
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName2).edit(ctr -> new KafkaConnectBuilder(ctr).editMetadata().addToLabels("foo", "baz").endMetadata().build());
waitForConnectReady(connectName1);
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
assertThat(resources.value(), is(1.0));
assertThat(resourcesPaused.value(), is(1.0));
async.flag();
})));
})));
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorResourceMetricsConnectDeletion.
// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsConnectDeletion(VertxTestContext context) {
String connectName = "cluster";
String connectorName1 = "connector1";
String connectorName2 = "connector2";
when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
KafkaConnect kafkaConnect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect);
waitForConnectReady(connectName);
KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
KafkaConnector connector2 = new KafkaConnectorBuilder(connector1).editMetadata().withName(connectorName2).endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
waitForConnectorPaused(connectorName1);
waitForConnectorPaused(connectorName2);
MeterRegistry meterRegistry = metricsProvider.meterRegistry();
Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
Promise<Void> reconciled1 = Promise.promise();
Promise<Void> reconciled2 = Promise.promise();
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
Checkpoint async = context.checkpoint();
reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
assertThat(resources.value(), is(2.0));
Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
assertThat(resourcesPaused.value(), is(2.0));
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).delete(kafkaConnect);
waitForConnectDeleted(connectName);
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
assertThat(resources.value(), is(0.0));
assertThat(resourcesPaused.value(), is(0.0));
async.flag();
})));
})));
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi by strimzi.
the class JbodStorageMockTest method init.
@BeforeEach
private void init() {
this.volumes = new ArrayList<>(2);
volumes.add(new PersistentClaimStorageBuilder().withId(0).withDeleteClaim(true).withSize("100Gi").build());
volumes.add(new PersistentClaimStorageBuilder().withId(1).withDeleteClaim(false).withSize("100Gi").build());
this.kafka = new KafkaBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(NAME).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(volumes).endJbodStorage().endKafka().withNewZookeeper().withReplicas(1).endZookeeper().endSpec().build();
// Configure the Kubernetes Mock
mockKube = new MockKube2.MockKube2Builder(client).withKafkaCrd().withInitialKafkas(kafka).withStrimziPodSetCrd().withDeploymentController().withPodController().withStatefulSetController().withServiceController().build();
mockKube.start();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16);
// creating the Kafka operator
ResourceOperatorSupplier ros = new ResourceOperatorSupplier(this.vertx, this.client, ResourceUtils.zookeeperLeaderFinder(this.vertx, this.client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, 60_000L);
this.operator = new KafkaAssemblyOperator(this.vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), ros, ResourceUtils.dummyClusterOperatorConfig(VERSIONS, 2_000));
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi by strimzi.
the class KafkaUpgradeDowngradeMockTest method initialize.
private Future<Void> initialize(VertxTestContext context, Kafka initialKafka) {
// Configure the Kubernetes Mock
mockKube = new MockKube2.MockKube2Builder(client).withKafkaCrd().withInitialKafkas(initialKafka).withStrimziPodSetCrd().withStatefulSetController().withPodController().withServiceController().withDeploymentController().build();
mockKube.start();
ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, 2_000);
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS);
operator = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, config);
LOGGER.info("Reconciling initially -> create");
return operator.reconcile(new Reconciliation("initial-reconciliation", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
}
Aggregations