Search in sources :

Example 6 with KRaftNotSupported

use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.

the class RecoveryIsolatedST method testRecoveryFromKafkaAndZookeeperPodDeletion.

@IsolatedTest
@StrimziPodSetTest
@KRaftNotSupported("Zookeeper is not supported by KRaft mode and is used in this test class")
void testRecoveryFromKafkaAndZookeeperPodDeletion() {
    final String kafkaName = KafkaResources.kafkaStatefulSetName(sharedClusterName);
    final String zkName = KafkaResources.zookeeperStatefulSetName(sharedClusterName);
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(sharedClusterName, kafkaName);
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(sharedClusterName, zkName);
    LOGGER.info("Deleting most of the Kafka and ZK pods");
    List<Pod> kafkaPodList = kubeClient().listPods(kafkaSelector);
    List<Pod> zkPodList = kubeClient().listPods(zkSelector);
    kafkaPodList.subList(0, kafkaPodList.size() - 1).forEach(pod -> kubeClient().deletePod(pod));
    zkPodList.subList(0, zkPodList.size() - 1).forEach(pod -> kubeClient().deletePod(pod));
    StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(kafkaName, KAFKA_REPLICAS);
    StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(zkName, ZOOKEEPER_REPLICAS);
    KafkaUtils.waitForKafkaReady(sharedClusterName);
}
Also used : Pod(io.fabric8.kubernetes.api.model.Pod) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) StrimziPodSetTest(io.strimzi.systemtest.annotations.StrimziPodSetTest) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest)

Example 7 with KRaftNotSupported

use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.

the class RollingUpdateST method testZookeeperScaleUpScaleDown.

@ParallelNamespaceTest
@Tag(SCALABILITY)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
    // kafka cluster already deployed
    LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", testStorage.getClusterName());
    final int initialZkReplicas = kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withLabelSelector(testStorage.getZookeeperSelector()).list().getItems().size();
    assertThat(initialZkReplicas, is(3));
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    final int scaleZkTo = initialZkReplicas + 4;
    final List<String> newZkPodNames = new ArrayList<String>() {

        {
            for (int i = initialZkReplicas; i < scaleZkTo; i++) {
                add(KafkaResources.zookeeperPodName(testStorage.getClusterName(), i));
            }
        }
    };
    LOGGER.info("Scale up Zookeeper to {}", scaleZkTo);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(scaleZkTo), testStorage.getNamespaceName());
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), scaleZkTo);
    // check the new node is either in leader or follower state
    KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2, 3, 4, 5, 6);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Create new topic to ensure, that ZK is working properly
    String scaleUpTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleUpTopicName, 1, 1).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(scaleUpTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // scale down
    LOGGER.info("Scale down Zookeeper to {}", initialZkReplicas);
    // Get zk-3 uid before deletion
    String uid = kubeClient(testStorage.getNamespaceName()).getPodUid(newZkPodNames.get(3));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(initialZkReplicas), testStorage.getNamespaceName());
    RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), initialZkReplicas);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    // Wait for one zk pods will became leader and others follower state
    KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2);
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Create new topic to ensure, that ZK is working properly
    String scaleDownTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleDownTopicName, 1, 1).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(scaleDownTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Test that the second pod has event 'Killing'
    assertThat(kubeClient(testStorage.getNamespaceName()).listEventsByResourceUid(uid), hasAllOfReasons(Killing));
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) ArrayList(java.util.ArrayList) TestStorage(io.strimzi.systemtest.storage.TestStorage) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 8 with KRaftNotSupported

use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.

the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithPlainListener.

@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testNetworkPoliciesWithPlainListener(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
    final String topic0 = "topic-example-0";
    final String topic1 = "topic-example-1";
    final String deniedProducerName = "denied-" + testStorage.getProducerName();
    final String deniedConsumerName = "denied-" + testStorage.getConsumerName();
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(clusterOperator.getDeploymentNamespace()).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getProducerName()).endPodSelector().build(), new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getConsumerName()).endPodSelector().build()).build()).endKafka().withNewKafkaExporter().endKafkaExporter().endSpec().build(), ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build(), KafkaUserTemplates.scramShaUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic0).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic1).build());
    final String scraperPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName();
    NetworkPolicyResource.allowNetworkPolicySettingsForKafkaExporter(extensionContext, testStorage.getClusterName());
    LOGGER.info("Verifying that producer/consumer: {}/{} are able to exchange messages", testStorage.getProducerName(), testStorage.getConsumerName());
    KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(topic0).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerScramShaPlainStrimzi(), kafkaClients.consumerScramShaPlainStrimzi());
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withProducerName(deniedProducerName).withConsumerName(deniedConsumerName).withTopicName(topic1).build();
    LOGGER.info("Verifying that producer/consumer: {}/{} are not able to exchange messages", deniedProducerName, deniedConsumerName);
    resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
    ClientUtils.waitForClientsTimeout(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    LOGGER.info("Check metrics exported by Kafka Exporter");
    MetricsCollector metricsCollector = new MetricsCollector.Builder().withScraperPodName(scraperPodName).withComponentName(testStorage.getClusterName()).withComponentType(ComponentType.KafkaExporter).build();
    Map<String, String> kafkaExporterMetricsData = metricsCollector.collectMetricsFromPods();
    assertThat("Kafka Exporter metrics should be non-empty", kafkaExporterMetricsData.size() > 0);
    for (Map.Entry<String, String> entry : kafkaExporterMetricsData.entrySet()) {
        assertThat("Value from collected metric should be non-empty", !entry.getValue().isEmpty());
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_consumergroup_current_offset"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic0 + "\"} 1"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic1 + "\"} 1"));
    }
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) Map(java.util.Map) HashMap(java.util.HashMap) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 9 with KRaftNotSupported

use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.

the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithTlsListener.

@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testNetworkPoliciesWithTlsListener(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
    final String topic0 = "topic-example-0";
    final String topic1 = "topic-example-1";
    final String deniedProducerName = "denied-" + testStorage.getProducerName();
    final String deniedConsumerName = "denied-" + testStorage.getConsumerName();
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(clusterOperator.getDeploymentNamespace()).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getProducerName()).endPodSelector().build(), new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getConsumerName()).endPodSelector().build()).build()).endKafka().endSpec().build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic0).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic1).build(), KafkaUserTemplates.scramShaUser(testStorage.getClusterName(), testStorage.getUserName()).build());
    LOGGER.info("Verifying that producer/consumer: {}/{} are able to exchange messages", testStorage.getProducerName(), testStorage.getConsumerName());
    KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(topic0).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withProducerName(deniedProducerName).withConsumerName(deniedConsumerName).withTopicName(topic1).build();
    LOGGER.info("Verifying that producer/consumer: {}/{} are not able to exchange messages", deniedProducerName, deniedConsumerName);
    resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
    ClientUtils.waitForClientsTimeout(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 10 with KRaftNotSupported

use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.

the class SecurityST method testCaRenewalBreakInMiddle.

@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testCaRenewalBreakInMiddle(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).editSpec().withNewClusterCa().withRenewalDays(1).withValidityDays(3).endClusterCa().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
    KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    Map<String, String> zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
    Map<String, String> eoPods = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(testStorage.getClusterName()));
    InputStream secretInputStream = getClass().getClassLoader().getResourceAsStream("security-st-certs/expired-cluster-ca.crt");
    String clusterCaCert = TestUtils.readResource(secretInputStream);
    SecretUtils.createSecret(testStorage.getNamespaceName(), clusterCaCertificateSecretName(testStorage.getClusterName()), "ca.crt", clusterCaCert);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> {
        k.getSpec().getZookeeper().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100000m")).build());
        k.getSpec().setClusterCa(new CertificateAuthorityBuilder().withRenewalDays(4).withValidityDays(7).build());
    }, testStorage.getNamespaceName());
    TestUtils.waitFor("Waiting for some kafka pod to be in the pending phase because of selected high cpu resource", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> {
        List<Pod> pendingPods = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName())).stream().filter(pod -> pod.getStatus().getPhase().equals("Pending")).collect(Collectors.toList());
        if (pendingPods.isEmpty()) {
            LOGGER.info("No pods of {} are in desired state", KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName()));
            return false;
        } else {
            LOGGER.info("Pod in 'Pending' state: {}", pendingPods.get(0).getMetadata().getName());
            return true;
        }
    });
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> {
        k.getSpec().getZookeeper().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("200m")).build());
    }, testStorage.getNamespaceName());
    // Wait until the certificates have been replaced
    SecretUtils.waitForCertToChange(testStorage.getNamespaceName(), clusterCaCert, KafkaResources.clusterCaCertificateSecretName(testStorage.getClusterName()));
    RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 3, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
    DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(testStorage.getClusterName()), 1, eoPods);
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Try to send and receive messages with new certificates
    String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topicName).build());
    kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).withTopicName(topicName).build();
    resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Quantity(io.fabric8.kubernetes.api.model.Quantity) X509Certificate(java.security.cert.X509Certificate) CoreMatchers.is(org.hamcrest.CoreMatchers.is) DeletionPropagation(io.fabric8.kubernetes.api.model.DeletionPropagation) GroupAuthorizationException(org.apache.kafka.common.errors.GroupAuthorizationException) Arrays(java.util.Arrays) KafkaExporterResources(io.strimzi.api.kafka.model.KafkaExporterResources) SANITY(io.strimzi.systemtest.Constants.SANITY) Date(java.util.Date) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) Random(java.util.Random) AclOperation(io.strimzi.api.kafka.model.AclOperation) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) KafkaConnectResource(io.strimzi.systemtest.resources.crd.KafkaConnectResource) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) KafkaResources.clusterCaKeySecretName(io.strimzi.api.kafka.model.KafkaResources.clusterCaKeySecretName) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) KafkaListenerAuthenticationTls(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls) KafkaResources.clientsCaKeySecretName(io.strimzi.api.kafka.model.KafkaResources.clientsCaKeySecretName) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Ca(io.strimzi.operator.cluster.model.Ca) KafkaConnectUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) StUtils(io.strimzi.systemtest.utils.StUtils) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) ACCEPTANCE(io.strimzi.systemtest.Constants.ACCEPTANCE) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) KafkaMirrorMaker(io.strimzi.api.kafka.model.KafkaMirrorMaker) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) KafkaMirrorMakerUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaMirrorMakerUtils) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) CruiseControlResources(io.strimzi.api.kafka.model.CruiseControlResources) KafkaResources.clusterCaCertificateSecretName(io.strimzi.api.kafka.model.KafkaResources.clusterCaCertificateSecretName) List(java.util.List) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Secret(io.fabric8.kubernetes.api.model.Secret) Matchers.containsString(org.hamcrest.Matchers.containsString) AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) IntStream(java.util.stream.IntStream) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) KafkaConnectTemplates(io.strimzi.systemtest.templates.crd.KafkaConnectTemplates) KafkaMirrorMakerTemplates(io.strimzi.systemtest.templates.crd.KafkaMirrorMakerTemplates) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) CONNECT(io.strimzi.systemtest.Constants.CONNECT) LocalDateTime(java.time.LocalDateTime) CoreMatchers.not(org.hamcrest.CoreMatchers.not) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) MIRROR_MAKER(io.strimzi.systemtest.Constants.MIRROR_MAKER) TestStorage(io.strimzi.systemtest.storage.TestStorage) SecretUtils(io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils) ArrayList(java.util.ArrayList) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) TestUtils(io.strimzi.test.TestUtils) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) SslConfigs(org.apache.kafka.common.config.SslConfigs) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) CoreMatchers.sameInstance(org.hamcrest.CoreMatchers.sameInstance) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) KafkaResources.clientsCaCertificateSecretName(io.strimzi.api.kafka.model.KafkaResources.clientsCaCertificateSecretName) CertificateAuthority(io.strimzi.api.kafka.model.CertificateAuthority) Constants(io.strimzi.systemtest.Constants) Pod(io.fabric8.kubernetes.api.model.Pod) Matchers(org.hamcrest.Matchers) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) KafkaMirrorMakerResource(io.strimzi.systemtest.resources.crd.KafkaMirrorMakerResource) DeploymentUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) CONNECT_COMPONENTS(io.strimzi.systemtest.Constants.CONNECT_COMPONENTS) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) CertificateAuthorityBuilder(io.strimzi.api.kafka.model.CertificateAuthorityBuilder) KafkaConnectResources(io.strimzi.api.kafka.model.KafkaConnectResources) InputStream(java.io.InputStream) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Pod(io.fabric8.kubernetes.api.model.Pod) InputStream(java.io.InputStream) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) Quantity(io.fabric8.kubernetes.api.model.Quantity) TestStorage(io.strimzi.systemtest.storage.TestStorage) Matchers.containsString(org.hamcrest.Matchers.containsString) CertificateAuthorityBuilder(io.strimzi.api.kafka.model.CertificateAuthorityBuilder) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

KRaftNotSupported (io.strimzi.systemtest.annotations.KRaftNotSupported)162 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)138 Tag (org.junit.jupiter.api.Tag)100 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)94 TestStorage (io.strimzi.systemtest.storage.TestStorage)94 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)86 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)80 Matchers.containsString (org.hamcrest.Matchers.containsString)74 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)44 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)36 ExternalKafkaClient (io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient)32 Secret (io.fabric8.kubernetes.api.model.Secret)30 HashMap (java.util.HashMap)30 KafkaListenerAuthenticationTls (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls)28 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)24 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)24 AbstractST (io.strimzi.systemtest.AbstractST)24 Constants (io.strimzi.systemtest.Constants)24 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)24 KafkaResource (io.strimzi.systemtest.resources.crd.KafkaResource)24