Search in sources :

Example 46 with StrimziPodSet

use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.

the class KafkaAssemblyOperatorTest method updateCluster.

@SuppressWarnings({ "checkstyle:NPathComplexity", "checkstyle:JavaNCSS", "checkstyle:MethodLength" })
private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kafka updatedAssembly) {
    KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS);
    KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, VERSIONS);
    ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS);
    ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, VERSIONS);
    EntityOperator originalEntityOperator = EntityOperator.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, VERSIONS, true);
    KafkaExporter originalKafkaExporter = KafkaExporter.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, VERSIONS);
    CruiseControl originalCruiseControl = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS, updatedKafkaCluster.getStorage());
    // create CM, Service, headless service, statefulset and so on
    ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift);
    ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS);
    var mockKafkaOps = supplier.kafkaOperator;
    ConfigMapOperator mockCmOps = supplier.configMapOperations;
    ServiceOperator mockServiceOps = supplier.serviceOperations;
    StatefulSetOperator mockStsOps = supplier.stsOperations;
    PvcOperator mockPvcOps = supplier.pvcOperations;
    PodOperator mockPodOps = supplier.podOperations;
    DeploymentOperator mockDepOps = supplier.deploymentOperations;
    SecretOperator mockSecretOps = supplier.secretOperations;
    NetworkPolicyOperator mockPolicyOps = supplier.networkPolicyOperator;
    PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
    NodeOperator mockNodeOps = supplier.nodeOperator;
    IngressOperator mockIngressOps = supplier.ingressOperations;
    RouteOperator mockRouteOps = supplier.routeOperations;
    var mockPodSetOps = supplier.strimziPodSetOperator;
    String clusterName = updatedAssembly.getMetadata().getName();
    String clusterNamespace = updatedAssembly.getMetadata().getNamespace();
    Map<String, PersistentVolumeClaim> zkPvcs = createPvcs(clusterNamespace, originalZookeeperCluster.getStorage(), originalZookeeperCluster.getReplicas(), (replica, storageId) -> AbstractModel.VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(clusterName, replica));
    zkPvcs.putAll(createPvcs(clusterNamespace, updatedZookeeperCluster.getStorage(), updatedZookeeperCluster.getReplicas(), (replica, storageId) -> AbstractModel.VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(clusterName, replica)));
    Map<String, PersistentVolumeClaim> kafkaPvcs = createPvcs(clusterNamespace, originalKafkaCluster.getStorage(), originalKafkaCluster.getReplicas(), (replica, storageId) -> {
        String name = VolumeUtils.createVolumePrefix(storageId, false);
        return name + "-" + KafkaResources.kafkaPodName(clusterName, replica);
    });
    kafkaPvcs.putAll(createPvcs(clusterNamespace, updatedKafkaCluster.getStorage(), updatedKafkaCluster.getReplicas(), (replica, storageId) -> {
        String name = VolumeUtils.createVolumePrefix(storageId, false);
        return name + "-" + KafkaResources.kafkaPodName(clusterName, replica);
    }));
    when(mockPvcOps.get(eq(clusterNamespace), ArgumentMatchers.startsWith("data-"))).thenAnswer(invocation -> {
        String pvcName = invocation.getArgument(1);
        if (pvcName.contains(originalZookeeperCluster.getName())) {
            return zkPvcs.get(pvcName);
        } else if (pvcName.contains(originalKafkaCluster.getName())) {
            return kafkaPvcs.get(pvcName);
        }
        return null;
    });
    when(mockPvcOps.getAsync(eq(clusterNamespace), ArgumentMatchers.startsWith("data-"))).thenAnswer(invocation -> {
        String pvcName = invocation.getArgument(1);
        if (pvcName.contains(originalZookeeperCluster.getName())) {
            return Future.succeededFuture(zkPvcs.get(pvcName));
        } else if (pvcName.contains(originalKafkaCluster.getName())) {
            return Future.succeededFuture(kafkaPvcs.get(pvcName));
        }
        return Future.succeededFuture(null);
    });
    when(mockPvcOps.listAsync(eq(clusterNamespace), ArgumentMatchers.any(Labels.class))).thenAnswer(invocation -> {
        Labels labels = invocation.getArgument(1);
        if (labels.toMap().get(Labels.STRIMZI_NAME_LABEL).contains("kafka")) {
            return Future.succeededFuture(new ArrayList<>(kafkaPvcs.values()));
        } else if (labels.toMap().get(Labels.STRIMZI_NAME_LABEL).contains("zookeeper")) {
            return Future.succeededFuture(new ArrayList<>(zkPvcs.values()));
        }
        return Future.succeededFuture(Collections.EMPTY_LIST);
    });
    when(mockPvcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
    // Mock CM get
    when(mockKafkaOps.get(clusterNamespace, clusterName)).thenReturn(updatedAssembly);
    when(mockKafkaOps.getAsync(eq(clusterNamespace), eq(clusterName))).thenReturn(Future.succeededFuture(updatedAssembly));
    when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture());
    when(mockPodSetOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StrimziPodSet())));
    when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
    ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata().withName("metrics-cm").endMetadata().withData(singletonMap("metrics-config.yml", "")).build();
    ConfigMap metricsAndLoggingCm = originalKafkaCluster.generateSharedConfigurationConfigMap(new MetricsAndLogging(metricsCm, null), Map.of(), Map.of(), false);
    when(mockCmOps.get(clusterNamespace, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName))).thenReturn(metricsAndLoggingCm);
    when(mockCmOps.getAsync(clusterNamespace, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName))).thenReturn(Future.succeededFuture(metricsAndLoggingCm));
    ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)).withNamespace(clusterNamespace).endMetadata().withData(singletonMap(AbstractModel.ANCILLARY_CM_KEY_METRICS, TestUtils.toYamlString(METRICS_CONFIG))).build();
    when(mockCmOps.get(clusterNamespace, KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName))).thenReturn(zkMetricsCm);
    ConfigMap logCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).withNamespace(clusterNamespace).endMetadata().withData(singletonMap(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG, updatedKafkaCluster.loggingConfiguration(LOG_KAFKA_CONFIG, null))).build();
    when(mockCmOps.get(clusterNamespace, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName))).thenReturn(logCm);
    ConfigMap zklogsCm = new ConfigMapBuilder().withNewMetadata().withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)).withNamespace(clusterNamespace).endMetadata().withData(singletonMap(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG, updatedZookeeperCluster.loggingConfiguration(LOG_ZOOKEEPER_CONFIG, null))).build();
    when(mockCmOps.get(clusterNamespace, KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName))).thenReturn(zklogsCm);
    when(mockCmOps.getAsync(clusterNamespace, metricsCMName)).thenReturn(Future.succeededFuture(metricsCM));
    when(mockCmOps.getAsync(clusterNamespace, differentMetricsCMName)).thenReturn(Future.succeededFuture(metricsCM));
    when(mockCmOps.listAsync(clusterNamespace, updatedKafkaCluster.getSelectorLabels())).thenReturn(Future.succeededFuture(List.of()));
    // Mock pod ops
    when(mockPodOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    when(mockPodOps.listAsync(anyString(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
    // Mock node ops
    when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
    // Mock Service gets
    Set<Service> expectedServices = new HashSet<>();
    expectedServices.add(updatedKafkaCluster.generateService());
    expectedServices.add(updatedKafkaCluster.generateHeadlessService());
    expectedServices.addAll(updatedKafkaCluster.generateExternalBootstrapServices());
    int replicas = updatedKafkaCluster.getReplicas();
    for (int i = 0; i < replicas; i++) {
        expectedServices.addAll(updatedKafkaCluster.generateExternalServices(i));
    }
    Map<String, Service> expectedServicesMap = expectedServices.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s));
    when(mockServiceOps.endpointReadiness(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    when(mockServiceOps.get(eq(clusterNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedServicesMap.get(i.<String>getArgument(1))));
    when(mockServiceOps.getAsync(eq(clusterNamespace), anyString())).thenAnswer(i -> {
        Service svc = expectedServicesMap.get(i.<String>getArgument(1));
        if (svc != null && "NodePort".equals(svc.getSpec().getType())) {
            svc.getSpec().getPorts().get(0).setNodePort(32000);
        }
        return Future.succeededFuture(svc);
    });
    when(mockServiceOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(asList(originalKafkaCluster.generateService(), originalKafkaCluster.generateHeadlessService())));
    when(mockServiceOps.hasNodePort(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    // Ingress mocks
    when(mockIngressOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
    // Route Mocks
    if (openShift) {
        Set<Route> expectedRoutes = new HashSet<>(originalKafkaCluster.generateExternalBootstrapRoutes());
        for (int i = 0; i < replicas; i++) {
            expectedRoutes.addAll(originalKafkaCluster.generateExternalRoutes(i));
        }
        Map<String, Route> expectedRoutesMap = expectedRoutes.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s));
        when(mockRouteOps.get(eq(clusterNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedRoutesMap.get(i.<String>getArgument(1))));
        when(mockRouteOps.getAsync(eq(clusterNamespace), anyString())).thenAnswer(i -> {
            Route rt = expectedRoutesMap.get(i.<String>getArgument(1));
            if (rt != null) {
                RouteStatus st = new RouteStatusBuilder().withIngress(new RouteIngressBuilder().withHost("host").build()).build();
                rt.setStatus(st);
            }
            return Future.succeededFuture(rt);
        });
        when(mockRouteOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
        when(mockRouteOps.hasAddress(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    }
    // Mock Secret gets
    when(mockSecretOps.list(anyString(), any())).thenReturn(emptyList());
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.kafkaJmxSecretName(clusterName))).thenReturn(Future.succeededFuture(originalKafkaCluster.generateJmxSecret(null)));
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.zookeeperJmxSecretName(clusterName))).thenReturn(Future.succeededFuture(originalZookeeperCluster.generateJmxSecret(null)));
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.zookeeperSecretName(clusterName))).thenReturn(Future.succeededFuture());
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.kafkaSecretName(clusterName))).thenReturn(Future.succeededFuture());
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.entityTopicOperatorSecretName(clusterName))).thenReturn(Future.succeededFuture());
    when(mockSecretOps.getAsync(clusterNamespace, KafkaExporterResources.secretName(clusterName))).thenReturn(Future.succeededFuture());
    when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.clusterCaCertificateSecretName(clusterName))).thenReturn(Future.succeededFuture(new Secret()));
    when(mockSecretOps.getAsync(clusterNamespace, ClusterOperator.secretName(clusterName))).thenReturn(Future.succeededFuture(new Secret()));
    when(mockSecretOps.getAsync(clusterNamespace, CruiseControlResources.secretName(clusterName))).thenReturn(Future.succeededFuture());
    // Mock NetworkPolicy get
    when(mockPolicyOps.get(clusterNamespace, KafkaResources.kafkaNetworkPolicyName(clusterName))).thenReturn(originalKafkaCluster.generateNetworkPolicy(null, null));
    when(mockPolicyOps.get(clusterNamespace, KafkaResources.zookeeperNetworkPolicyName(clusterName))).thenReturn(originalZookeeperCluster.generateNetworkPolicy(null, null));
    // Mock PodDisruptionBudget get
    when(mockPdbOps.get(clusterNamespace, KafkaResources.kafkaStatefulSetName(clusterName))).thenReturn(originalKafkaCluster.generatePodDisruptionBudget());
    when(mockPdbOps.get(clusterNamespace, KafkaResources.zookeeperStatefulSetName(clusterName))).thenReturn(originalZookeeperCluster.generatePodDisruptionBudget());
    // Mock StatefulSet get
    when(mockStsOps.get(eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)))).thenReturn(originalKafkaCluster.generateStatefulSet(openShift, null, null, null));
    when(mockStsOps.get(eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)))).thenReturn(originalZookeeperCluster.generateStatefulSet(openShift, null, null));
    // Mock Deployment get
    if (originalEntityOperator != null) {
        when(mockDepOps.get(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn(originalEntityOperator.generateDeployment(true, null, null));
        when(mockDepOps.getAsync(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn(Future.succeededFuture(originalEntityOperator.generateDeployment(true, null, null)));
        when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
        when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    }
    if (originalCruiseControl != null) {
        when(mockDepOps.get(clusterNamespace, CruiseControlResources.deploymentName(clusterName))).thenReturn(originalCruiseControl.generateDeployment(true, null, null));
        when(mockDepOps.getAsync(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn(Future.succeededFuture(originalCruiseControl.generateDeployment(true, null, null)));
        when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
        when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    }
    if (metrics) {
        when(mockDepOps.get(clusterNamespace, KafkaExporterResources.deploymentName(clusterName))).thenReturn(originalKafkaExporter.generateDeployment(true, null, null));
        when(mockDepOps.getAsync(clusterNamespace, KafkaExporterResources.deploymentName(clusterName))).thenReturn(Future.succeededFuture(originalKafkaExporter.generateDeployment(true, null, null)));
        when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
        when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    }
    // Mock CM patch
    Set<String> metricsCms = set();
    doAnswer(invocation -> {
        metricsCms.add(invocation.getArgument(1));
        return Future.succeededFuture();
    }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any());
    Set<String> logCms = set();
    doAnswer(invocation -> {
        logCms.add(invocation.getArgument(1));
        return Future.succeededFuture();
    }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any());
    // Mock Service patch (both service and headless service
    ArgumentCaptor<String> patchedServicesCaptor = ArgumentCaptor.forClass(String.class);
    when(mockServiceOps.reconcile(any(), eq(clusterNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture());
    // Mock Secrets patch
    when(mockSecretOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture());
    // Mock NetworkPolicy patch
    when(mockPolicyOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture());
    // Mock PodDisruptionBudget patch
    when(mockPdbOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture());
    // Mock StatefulSet patch
    when(mockStsOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)), any())).thenAnswer(invocation -> {
        StatefulSet sts = invocation.getArgument(3);
        return Future.succeededFuture(ReconcileResult.patched(sts));
    });
    when(mockStsOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)), any())).thenAnswer(invocation -> {
        StatefulSet sts = invocation.getArgument(3);
        return Future.succeededFuture(ReconcileResult.patched(sts));
    });
    when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture(originalZookeeperCluster.generateStatefulSet(openShift, null, null)));
    when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture());
    // Mock StatefulSet scaleUp
    // ArgumentCaptor<String> scaledUpCaptor = ArgumentCaptor.forClass(String.class);
    when(mockStsOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
    // Mock StatefulSet scaleDown
    // ArgumentCaptor<String> scaledDownCaptor = ArgumentCaptor.forClass(String.class);
    when(mockStsOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
    // Mock Deployment patch
    ArgumentCaptor<String> depCaptor = ArgumentCaptor.forClass(String.class);
    when(mockDepOps.reconcile(any(), anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture());
    KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), certManager, passwordGenerator, supplier, config);
    // Now try to update a KafkaCluster based on this CM
    Checkpoint async = context.checkpoint();
    ops.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, clusterNamespace, clusterName), updatedAssembly).onComplete(context.succeeding(v -> context.verify(() -> {
        // rolling restart
        Set<String> expectedRollingRestarts = set();
        if (StatefulSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, originalKafkaCluster.generateStatefulSet(openShift, null, null, null), updatedKafkaCluster.generateStatefulSet(openShift, null, null, null)))) {
            expectedRollingRestarts.add(originalKafkaCluster.getName());
        }
        if (StatefulSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, originalZookeeperCluster.generateStatefulSet(openShift, null, null), updatedZookeeperCluster.generateStatefulSet(openShift, null, null)))) {
            expectedRollingRestarts.add(originalZookeeperCluster.getName());
        }
        // Check that ZK scale-up happens when it should
        boolean zkScaledUp = updatedAssembly.getSpec().getZookeeper().getReplicas() > originalAssembly.getSpec().getZookeeper().getReplicas();
        verify(mockStsOps, times(zkScaledUp ? 1 : 0)).scaleUp(any(), eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)), anyInt());
        // No metrics config  => no CMs created
        verify(mockCmOps, never()).createOrUpdate(any(), any());
        async.flag();
    })));
}
Also used : RouteIngressBuilder(io.fabric8.openshift.api.model.RouteIngressBuilder) JmxTransQueryTemplateBuilder(io.strimzi.api.kafka.model.template.JmxTransQueryTemplateBuilder) ArgumentMatchers(org.mockito.ArgumentMatchers) KafkaExporterResources(io.strimzi.api.kafka.model.KafkaExporterResources) KafkaJmxOptions(io.strimzi.api.kafka.model.KafkaJmxOptions) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) PodDisruptionBudget(io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget) AfterAll(org.junit.jupiter.api.AfterAll) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) RouteStatusBuilder(io.fabric8.openshift.api.model.RouteStatusBuilder) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) BeforeAll(org.junit.jupiter.api.BeforeAll) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) AbstractModel(io.strimzi.operator.cluster.model.AbstractModel) StatefulSetOperator(io.strimzi.operator.cluster.operator.resource.StatefulSetOperator) KafkaJmxOptionsBuilder(io.strimzi.api.kafka.model.KafkaJmxOptionsBuilder) KafkaVersion(io.strimzi.operator.cluster.model.KafkaVersion) Set(java.util.Set) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) PasswordGenerator(io.strimzi.operator.common.PasswordGenerator) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) RouteOperator(io.strimzi.operator.common.operator.resource.RouteOperator) EntityOperator(io.strimzi.operator.cluster.model.EntityOperator) ClusterCa(io.strimzi.operator.cluster.model.ClusterCa) PersistentVolumeClaimBuilder(io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder) EntityUserOperatorSpecBuilder(io.strimzi.api.kafka.model.EntityUserOperatorSpecBuilder) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) StatefulSetBuilder(io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder) VertxTestContext(io.vertx.junit5.VertxTestContext) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) IngressOperator(io.strimzi.operator.common.operator.resource.IngressOperator) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) Matchers.hasSize(org.hamcrest.Matchers.hasSize) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) KafkaJmxAuthenticationPasswordBuilder(io.strimzi.api.kafka.model.KafkaJmxAuthenticationPasswordBuilder) KubernetesVersion(io.strimzi.operator.KubernetesVersion) Vertx(io.vertx.core.Vertx) JmxTransSpecBuilder(io.strimzi.api.kafka.model.JmxTransSpecBuilder) Mockito.times(org.mockito.Mockito.times) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) PvcOperator(io.strimzi.operator.common.operator.resource.PvcOperator) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Reconciliation(io.strimzi.operator.common.Reconciliation) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Mockito.never(org.mockito.Mockito.never) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Storage(io.strimzi.api.kafka.model.storage.Storage) StrimziPodSetOperator(io.strimzi.operator.common.operator.resource.StrimziPodSetOperator) BiFunction(java.util.function.BiFunction) Timeout(io.vertx.junit5.Timeout) Matchers.hasKey(org.hamcrest.Matchers.hasKey) KafkaExporter(io.strimzi.operator.cluster.model.KafkaExporter) Route(io.fabric8.openshift.api.model.Route) PodOperator(io.strimzi.operator.common.operator.resource.PodOperator) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) KafkaExporterSpec(io.strimzi.api.kafka.model.KafkaExporterSpec) MethodSource(org.junit.jupiter.params.provider.MethodSource) ListenersUtils(io.strimzi.operator.cluster.model.ListenersUtils) Collections.emptyList(java.util.Collections.emptyList) DeploymentOperator(io.strimzi.operator.common.operator.resource.DeploymentOperator) SecretOperator(io.strimzi.operator.common.operator.resource.SecretOperator) ClientsCa(io.strimzi.operator.cluster.model.ClientsCa) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) CruiseControlResources(io.strimzi.api.kafka.model.CruiseControlResources) Objects(java.util.Objects) List(java.util.List) Labels(io.strimzi.operator.common.model.Labels) StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) Secret(io.fabric8.kubernetes.api.model.Secret) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) PodDisruptionBudgetOperator(io.strimzi.operator.common.operator.resource.PodDisruptionBudgetOperator) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) MockCertManager(io.strimzi.operator.common.operator.MockCertManager) EntityOperatorSpecBuilder(io.strimzi.api.kafka.model.EntityOperatorSpecBuilder) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) EntityTopicOperatorSpecBuilder(io.strimzi.api.kafka.model.EntityTopicOperatorSpecBuilder) KafkaStatus(io.strimzi.api.kafka.model.status.KafkaStatus) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) NetworkPolicyOperator(io.strimzi.operator.common.operator.resource.NetworkPolicyOperator) SingleVolumeStorage(io.strimzi.api.kafka.model.storage.SingleVolumeStorage) HashMap(java.util.HashMap) ZookeeperCluster(io.strimzi.operator.cluster.model.ZookeeperCluster) VolumeUtils(io.strimzi.operator.cluster.model.VolumeUtils) AtomicReference(java.util.concurrent.atomic.AtomicReference) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) HashSet(java.util.HashSet) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) ServiceOperator(io.strimzi.operator.common.operator.resource.ServiceOperator) ArgumentCaptor(org.mockito.ArgumentCaptor) KafkaCluster(io.strimzi.operator.cluster.model.KafkaCluster) ClusterOperator(io.strimzi.operator.cluster.ClusterOperator) ConfigMapOperator(io.strimzi.operator.common.operator.resource.ConfigMapOperator) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) TestUtils(io.strimzi.test.TestUtils) ReconcileResult(io.strimzi.operator.common.operator.resource.ReconcileResult) Collections.singletonMap(java.util.Collections.singletonMap) Service(io.fabric8.kubernetes.api.model.Service) RouteStatus(io.fabric8.openshift.api.model.RouteStatus) JmxTransResources(io.strimzi.api.kafka.model.JmxTransResources) StatefulSetDiff(io.strimzi.operator.cluster.operator.resource.StatefulSetDiff) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) EntityOperatorSpec(io.strimzi.api.kafka.model.EntityOperatorSpec) CruiseControl(io.strimzi.operator.cluster.model.CruiseControl) Collections.emptyMap(java.util.Collections.emptyMap) NodeOperator(io.strimzi.operator.common.operator.resource.NodeOperator) JmxTransOutputDefinitionTemplateBuilder(io.strimzi.api.kafka.model.template.JmxTransOutputDefinitionTemplateBuilder) TestUtils.set(io.strimzi.test.TestUtils.set) Mockito.when(org.mockito.Mockito.when) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) NetworkPolicy(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy) Kafka(io.strimzi.api.kafka.model.Kafka) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) Collections(java.util.Collections) KafkaCluster(io.strimzi.operator.cluster.model.KafkaCluster) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RouteStatus(io.fabric8.openshift.api.model.RouteStatus) PvcOperator(io.strimzi.operator.common.operator.resource.PvcOperator) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) NodeOperator(io.strimzi.operator.common.operator.resource.NodeOperator) DeploymentOperator(io.strimzi.operator.common.operator.resource.DeploymentOperator) Route(io.fabric8.openshift.api.model.Route) HashSet(java.util.HashSet) CruiseControl(io.strimzi.operator.cluster.model.CruiseControl) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) PodDisruptionBudgetOperator(io.strimzi.operator.common.operator.resource.PodDisruptionBudgetOperator) Labels(io.strimzi.operator.common.model.Labels) StatefulSetOperator(io.strimzi.operator.cluster.operator.resource.StatefulSetOperator) SecretOperator(io.strimzi.operator.common.operator.resource.SecretOperator) Secret(io.fabric8.kubernetes.api.model.Secret) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) RouteOperator(io.strimzi.operator.common.operator.resource.RouteOperator) StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) EntityOperator(io.strimzi.operator.cluster.model.EntityOperator) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) Kafka(io.strimzi.api.kafka.model.Kafka) ServiceOperator(io.strimzi.operator.common.operator.resource.ServiceOperator) NetworkPolicyOperator(io.strimzi.operator.common.operator.resource.NetworkPolicyOperator) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) Reconciliation(io.strimzi.operator.common.Reconciliation) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) PodOperator(io.strimzi.operator.common.operator.resource.PodOperator) Service(io.fabric8.kubernetes.api.model.Service) ZookeeperCluster(io.strimzi.operator.cluster.model.ZookeeperCluster) RouteStatusBuilder(io.fabric8.openshift.api.model.RouteStatusBuilder) Checkpoint(io.vertx.junit5.Checkpoint) IngressOperator(io.strimzi.operator.common.operator.resource.IngressOperator) Checkpoint(io.vertx.junit5.Checkpoint) StatefulSetDiff(io.strimzi.operator.cluster.operator.resource.StatefulSetDiff) KafkaExporter(io.strimzi.operator.cluster.model.KafkaExporter) RouteIngressBuilder(io.fabric8.openshift.api.model.RouteIngressBuilder) ConfigMapOperator(io.strimzi.operator.common.operator.resource.ConfigMapOperator)

Example 47 with StrimziPodSet

use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.

the class StrimziPodSetControllerIT method testNonCascadingDeletion.

/**
 * Tests the non-cascading delete of the PodSet:
 *   - Creation of StrimziPodSet and the managed pod
 *   - Non-cascading deletion of the pod set
 *   - Check the pod is still there
 *
 * @param context   Test context
 */
@Test
public void testNonCascadingDeletion(VertxTestContext context) {
    String podSetName = "basic-test";
    String podName = podSetName + "-0";
    try {
        Pod pod = pod(podName, KAFKA_NAME, podSetName);
        podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, pod));
        // Check that pod is created
        TestUtils.waitFor("Wait for Pod to be created", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).get() != null, () -> context.failNow("Test timed out waiting for pod creation!"));
        // Wait until the pod is ready
        TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
        Pod actualPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
        // Check OwnerReference was added
        checkOwnerReference(actualPod, podSetName);
        // Delete the PodSet in non-cascading way
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).withPropagationPolicy(DeletionPropagation.ORPHAN).delete();
        // Check that the PodSet is deleted
        TestUtils.waitFor("Wait for StrimziPodSet deletion", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet == null;
        }, () -> context.failNow("PodSet was not deleted"));
        // Check that the pod still exists without owner reference
        actualPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
        assertThat(actualPod, is(notNullValue()));
        assertThat(actualPod.getMetadata().getOwnerReferences().size(), is(0));
        context.completeNow();
    } finally {
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
        client.pods().inNamespace(NAMESPACE).withName(podName).delete();
    }
}
Also used : StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) Pod(io.fabric8.kubernetes.api.model.Pod) Test(org.junit.jupiter.api.Test)

Example 48 with StrimziPodSet

use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.

the class StrimziPodSetControllerIT method testPodCreationDeletionAndRecreation.

/*
     * Tests
     */
/**
 * Tests the basic operations:
 *   - Creation of StrimziPodSet and the managed pod
 *   - Re-creation of the managed pod when it is deleted
 *   - Deletion of the StrimziPodSet and the managed pod
 *
 * @param context   Test context
 */
@Test
public void testPodCreationDeletionAndRecreation(VertxTestContext context) {
    String podSetName = "basic-test";
    String podName = podSetName + "-0";
    try {
        Pod pod = pod(podName, KAFKA_NAME, podSetName);
        podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, pod));
        // Check that pod is created
        TestUtils.waitFor("Wait for Pod to be created", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).get() != null, () -> context.failNow("Test timed out waiting for pod creation!"));
        // Wait until the pod is ready
        TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
        Pod actualPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
        // Check OwnerReference was added
        checkOwnerReference(actualPod, podSetName);
        // We keep the resource version for pod re-creation test
        String resourceVersion = actualPod.getMetadata().getResourceVersion();
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
        }, () -> context.failNow("Pod stats do not match"));
        // Delete the pod and test that it is recreated
        client.pods().inNamespace(NAMESPACE).withName(podName).delete();
        // Check that pod is created
        TestUtils.waitFor("Wait for Pod to be recreated", 100, 10_000, () -> {
            Pod p = client.pods().inNamespace(NAMESPACE).withName(podName).get();
            return p != null && !resourceVersion.equals(p.getMetadata().getResourceVersion());
        }, () -> context.failNow("Test timed out waiting for pod recreation!"));
        // Delete the PodSet
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
        // Check that pod is deleted after pod set deletion
        TestUtils.waitFor("Wait for Pod to be deleted", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).get() == null, () -> context.failNow("Test timed out waiting for pod deletion!"));
        context.completeNow();
    } finally {
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
    }
}
Also used : StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) Pod(io.fabric8.kubernetes.api.model.Pod) Test(org.junit.jupiter.api.Test)

Example 49 with StrimziPodSet

use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.

the class StrimziPodSetControllerIT method testScaleUpScaleDown.

/**
 * Tests scaling up and down of the StrimziPodSet and updates of the StrimziPodSet status.
 *
 * @param context   Test context
 */
@Test
public void testScaleUpScaleDown(VertxTestContext context) {
    String podSetName = "scale-up-down";
    String pod1Name = podSetName + "-0";
    String pod2Name = podSetName + "-1";
    try {
        Pod pod1 = pod(pod1Name, KAFKA_NAME, podSetName);
        podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, pod1));
        // Wait until the pod is ready
        TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod1Name).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
        }, () -> context.failNow("Pod stats do not match"));
        // Scale-up the pod-set
        Pod pod2 = pod(pod2Name, KAFKA_NAME, podSetName);
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).patch(podSet(podSetName, KAFKA_NAME, pod1, pod2));
        // Wait until the new pod is ready
        TestUtils.waitFor("Wait for second Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod2Name).isReady(), () -> context.failNow("Test timed out waiting for second pod readiness!"));
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 2 && podSet.getStatus().getReadyPods() == 2 && podSet.getStatus().getPods() == 2;
        }, () -> context.failNow("Pod stats do not match"));
        // Scale-down the pod-set
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).patch(podSet(podSetName, KAFKA_NAME, pod1));
        // Wait until the pod is ready
        TestUtils.waitFor("Wait for second Pod to be deleted", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(pod2Name).get() == null, () -> context.failNow("Test timed out waiting for second pod to be deleted!"));
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
        }, () -> context.failNow("Pod stats do not match"));
        context.completeNow();
    } finally {
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
    }
}
Also used : StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) Pod(io.fabric8.kubernetes.api.model.Pod) Test(org.junit.jupiter.api.Test)

Example 50 with StrimziPodSet

use of io.strimzi.api.kafka.model.StrimziPodSet in project strimzi-kafka-operator by strimzi.

the class StrimziPodSetControllerMockTest method testPodUpdates.

/**
 * Tests updates pods in the StrimziPodSet:
 *   - StrimziPodSetController should not roll the pods => the dedicated rollers do it
 *   - The pod should not be marked as current when it is updated
 *
 * @param context   Test context
 */
@Test
public void testPodUpdates(VertxTestContext context) {
    String podSetName = "pod-updates";
    String podName = podSetName + "-0";
    try {
        Pod originalPod = pod(podName, KAFKA_NAME, podSetName);
        podSetOp().inNamespace(NAMESPACE).create(podSet(podSetName, KAFKA_NAME, originalPod));
        // Wait until the pod is ready
        TestUtils.waitFor("Wait for Pod to be ready", 100, 10_000, () -> client.pods().inNamespace(NAMESPACE).withName(podName).isReady(), () -> context.failNow("Test timed out waiting for pod readiness!"));
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 1 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
        }, () -> context.failNow("Pod stats do not match"));
        // Get resource version to double-check the pod was not deleted
        Pod initialPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
        String resourceVersion = initialPod.getMetadata().getResourceVersion();
        // Update the pod with a new revision and
        Pod updatedPod = pod(podName, KAFKA_NAME, podSetName);
        updatedPod.getMetadata().getAnnotations().put(PodRevision.STRIMZI_REVISION_ANNOTATION, "new-revision");
        updatedPod.getSpec().setTerminationGracePeriodSeconds(1L);
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).patch(podSet(podSetName, KAFKA_NAME, updatedPod));
        // Check status of the PodSet
        TestUtils.waitFor("Wait for StrimziPodSetStatus", 100, 10_000, () -> {
            StrimziPodSet podSet = podSetOp().inNamespace(NAMESPACE).withName(podSetName).get();
            return podSet.getStatus().getCurrentPods() == 0 && podSet.getStatus().getReadyPods() == 1 && podSet.getStatus().getPods() == 1;
        }, () -> context.failNow("Pod stats do not match"));
        // Check the pod was not changed
        Pod actualPod = client.pods().inNamespace(NAMESPACE).withName(podName).get();
        assertThat(actualPod.getMetadata().getResourceVersion(), is(resourceVersion));
        assertThat(actualPod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(originalPod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION)));
        assertThat(actualPod.getSpec().getTerminationGracePeriodSeconds(), is(0L));
        context.completeNow();
    } finally {
        podSetOp().inNamespace(NAMESPACE).withName(podSetName).delete();
    }
}
Also used : StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) Pod(io.fabric8.kubernetes.api.model.Pod) Test(org.junit.jupiter.api.Test)

Aggregations

StrimziPodSet (io.strimzi.api.kafka.model.StrimziPodSet)106 Pod (io.fabric8.kubernetes.api.model.Pod)84 Test (org.junit.jupiter.api.Test)62 Reconciliation (io.strimzi.operator.common.Reconciliation)58 Kafka (io.strimzi.api.kafka.model.Kafka)54 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)50 ResourceOperatorSupplier (io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier)44 StatefulSetOperator (io.strimzi.operator.cluster.operator.resource.StatefulSetOperator)44 Labels (io.strimzi.operator.common.model.Labels)44 PodOperator (io.strimzi.operator.common.operator.resource.PodOperator)44 Checkpoint (io.vertx.junit5.Checkpoint)44 CoreMatchers.is (org.hamcrest.CoreMatchers.is)44 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)44 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)42 Map (java.util.Map)42 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)40 PlatformFeaturesAvailability (io.strimzi.operator.PlatformFeaturesAvailability)38 KafkaVersionTestUtils (io.strimzi.operator.cluster.KafkaVersionTestUtils)38 KafkaCluster (io.strimzi.operator.cluster.model.KafkaCluster)38 ArrayList (java.util.ArrayList)38