use of io.strimzi.test.mockkube2.MockKube2 in project strimzi-kafka-operator by strimzi.
the class JbodStorageMockTest method init.
@BeforeEach
private void init() {
this.volumes = new ArrayList<>(2);
volumes.add(new PersistentClaimStorageBuilder().withId(0).withDeleteClaim(true).withSize("100Gi").build());
volumes.add(new PersistentClaimStorageBuilder().withId(1).withDeleteClaim(false).withSize("100Gi").build());
this.kafka = new KafkaBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(NAME).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(volumes).endJbodStorage().endKafka().withNewZookeeper().withReplicas(1).endZookeeper().endSpec().build();
// Configure the Kubernetes Mock
mockKube = new MockKube2.MockKube2Builder(client).withKafkaCrd().withInitialKafkas(kafka).withStrimziPodSetCrd().withDeploymentController().withPodController().withStatefulSetController().withServiceController().build();
mockKube.start();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16);
// creating the Kafka operator
ResourceOperatorSupplier ros = new ResourceOperatorSupplier(this.vertx, this.client, ResourceUtils.zookeeperLeaderFinder(this.vertx, this.client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, 60_000L);
this.operator = new KafkaAssemblyOperator(this.vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), ros, ResourceUtils.dummyClusterOperatorConfig(VERSIONS, 2_000));
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorMockTest method init.
/*
* init is equivalent to a @BeforeEach method
* since this is a parameterized set, the tests params are only available at test start
* This must be called before each test
*/
private void init(Params params) {
setFields(params);
cluster = new KafkaBuilder().withNewMetadata().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withLabels(singletonMap("foo", "bar")).endMetadata().withNewSpec().withNewKafka().withReplicas(kafkaReplicas).withStorage(kafkaStorage).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build()).withResources(resources).endKafka().withNewZookeeper().withReplicas(zkReplicas).withStorage(zkStorage).endZookeeper().withNewEntityOperator().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().endEntityOperator().endSpec().build();
// Configure the Kubernetes Mock
mockKube = new MockKube2.MockKube2Builder(client).withKafkaCrd().withInitialKafkas(cluster).withStrimziPodSetCrd().withDeploymentController().withPodController().withStatefulSetController().withServiceController().build();
mockKube.start();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, kubernetesVersion);
ResourceOperatorSupplier supplier = supplierWithMocks();
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS);
operator = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, config);
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorResourceMetricsMoveConnectToOtherOperator.
// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsMoveConnectToOtherOperator(VertxTestContext context) {
String connectName1 = "cluster1";
String connectName2 = "cluster2";
String connectorName1 = "connector1";
String connectorName2 = "connector2";
when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
KafkaConnect kafkaConnect1 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName1).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
KafkaConnect kafkaConnect2 = new KafkaConnectBuilder(kafkaConnect1).editMetadata().withName(connectName2).endMetadata().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect1);
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect2);
waitForConnectReady(connectName1);
waitForConnectReady(connectName2);
KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName1).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
KafkaConnector connector2 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName2).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName2).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
waitForConnectorPaused(connectorName1);
waitForConnectorPaused(connectorName2);
MeterRegistry meterRegistry = metricsProvider.meterRegistry();
Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
Promise<Void> reconciled1 = Promise.promise();
Promise<Void> reconciled2 = Promise.promise();
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
Checkpoint async = context.checkpoint();
reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
assertThat(resources.value(), is(2.0));
Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
assertThat(resourcesPaused.value(), is(2.0));
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName2).edit(ctr -> new KafkaConnectBuilder(ctr).editMetadata().addToLabels("foo", "baz").endMetadata().build());
waitForConnectReady(connectName1);
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
assertThat(resources.value(), is(1.0));
assertThat(resourcesPaused.value(), is(1.0));
async.flag();
})));
})));
}
use of io.strimzi.test.mockkube2.MockKube2 in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorResourceMetricsConnectDeletion.
// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsConnectDeletion(VertxTestContext context) {
String connectName = "cluster";
String connectorName1 = "connector1";
String connectorName2 = "connector2";
when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
KafkaConnect kafkaConnect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect);
waitForConnectReady(connectName);
KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
KafkaConnector connector2 = new KafkaConnectorBuilder(connector1).editMetadata().withName(connectorName2).endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
waitForConnectorPaused(connectorName1);
waitForConnectorPaused(connectorName2);
MeterRegistry meterRegistry = metricsProvider.meterRegistry();
Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
Promise<Void> reconciled1 = Promise.promise();
Promise<Void> reconciled2 = Promise.promise();
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
Checkpoint async = context.checkpoint();
reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
assertThat(resources.value(), is(2.0));
Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
assertThat(resourcesPaused.value(), is(2.0));
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).delete(kafkaConnect);
waitForConnectDeleted(connectName);
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
assertThat(resources.value(), is(0.0));
assertThat(resourcesPaused.value(), is(0.0));
async.flag();
})));
})));
}
Aggregations