Search in sources :

Example 71 with TestStorage

use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.

the class OauthAuthorizationIsolatedST method testKeycloakAuthorizerToDelegateToSimpleAuthorizer.

@ParallelNamespaceTest
@Order(10)
void testKeycloakAuthorizerToDelegateToSimpleAuthorizer(ExtensionContext extensionContext) {
    TestStorage testStorage = new TestStorage(extensionContext);
    // we have to create keycloak, team-a-client and team-b-client secret from `infra-namespace` to the new namespace
    resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), KeycloakInstance.KEYCLOAK_SECRET_NAME));
    resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), TEAM_A_CLIENT_SECRET));
    resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), TEAM_B_CLIENT_SECRET));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1).editSpec().editKafka().withListeners(OauthAbstractST.BUILD_OAUTH_TLS_LISTENER.apply(keycloakInstance)).withNewKafkaAuthorizationKeycloak().withClientId(KAFKA_CLIENT_ID).withDisableTlsHostnameVerification(true).withDelegateToKafkaAcls(true).withTlsTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KeycloakInstance.KEYCLOAK_SECRET_NAME).withCertificate(KeycloakInstance.KEYCLOAK_SECRET_CERT).build()).withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).endKafkaAuthorizationKeycloak().endKafka().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), TEAM_A_CLIENT).build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), TEAM_B_CLIENT).build());
    final String teamAProducerName = TEAM_A_PRODUCER_NAME + "-" + testStorage.getClusterName();
    final String teamAConsumerName = TEAM_A_CONSUMER_NAME + "-" + testStorage.getClusterName();
    final String topicName = TOPIC_A + "-" + testStorage.getTopicName();
    final String consumerGroup = "a-consumer_group-" + testStorage.getConsumerName();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topicName, testStorage.getNamespaceName()).build());
    KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(testStorage.getNamespaceName()).withProducerName(teamAProducerName).withConsumerName(teamAConsumerName).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withConsumerGroup(consumerGroup).withOauthClientId(TEAM_A_CLIENT).withOauthClientSecret(TEAM_A_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
    resourceManager.createResource(extensionContext, teamAOauthClientJob.producerStrimziOauthTls(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(teamAProducerName, testStorage.getNamespaceName(), MESSAGE_COUNT);
    resourceManager.createResource(extensionContext, teamAOauthClientJob.consumerStrimziOauthTls(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(teamAConsumerName, testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Also used : CertSecretSourceBuilder(io.strimzi.api.kafka.model.CertSecretSourceBuilder) KafkaOauthClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClientsBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) KafkaOauthClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClients) Order(org.junit.jupiter.api.Order) TestMethodOrder(org.junit.jupiter.api.TestMethodOrder) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 72 with TestStorage

use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.

the class UserST method testCreatingUsersWithSecretPrefix.

@ParallelNamespaceTest
void testCreatingUsersWithSecretPrefix(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    final String secretPrefix = "top-secret-";
    final String tlsUserName = "encrypted-leopold";
    final String scramShaUserName = "scramed-leopold";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().editEntityOperator().editUserOperator().withSecretPrefix(secretPrefix).endUserOperator().endEntityOperator().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), tlsUserName).build(), KafkaUserTemplates.scramShaUser(testStorage.getClusterName(), scramShaUserName).build());
    Secret tlsSecret = kubeClient().getSecret(testStorage.getNamespaceName(), secretPrefix + tlsUserName);
    Secret scramShaSecret = kubeClient().getSecret(testStorage.getNamespaceName(), secretPrefix + scramShaUserName);
    LOGGER.info("Checking if user secrets with secret prefixes exists");
    assertNotNull(tlsSecret);
    assertNotNull(scramShaSecret);
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(secretPrefix + tlsUserName).build();
    LOGGER.info("Checking if TLS user is able to send messages");
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withUserName(secretPrefix + scramShaUserName).build();
    LOGGER.info("Checking if SCRAM-SHA user is able to send messages");
    resourceManager.createResource(extensionContext, clients.producerScramShaPlainStrimzi(), clients.consumerScramShaPlainStrimzi());
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    LOGGER.info("Checking owner reference - if the secret will be deleted when we delete KafkaUser");
    LOGGER.info("Deleting KafkaUser:{}", tlsUserName);
    KafkaUserResource.kafkaUserClient().inNamespace(testStorage.getNamespaceName()).withName(tlsUserName).delete();
    KafkaUserUtils.waitForKafkaUserDeletion(testStorage.getNamespaceName(), tlsUserName);
    LOGGER.info("Deleting KafkaUser:{}", scramShaUserName);
    KafkaUserResource.kafkaUserClient().inNamespace(testStorage.getNamespaceName()).withName(scramShaUserName).delete();
    KafkaUserUtils.waitForKafkaUserDeletion(testStorage.getNamespaceName(), scramShaUserName);
    LOGGER.info("Checking if secrets are deleted");
    SecretUtils.waitForSecretDeletion(testStorage.getNamespaceName(), tlsSecret.getMetadata().getName());
    SecretUtils.waitForSecretDeletion(testStorage.getNamespaceName(), scramShaSecret.getMetadata().getName());
    assertNull(kubeClient().getSecret(testStorage.getNamespaceName(), tlsSecret.getMetadata().getName()));
    assertNull(kubeClient().getSecret(testStorage.getNamespaceName(), scramShaSecret.getMetadata().getName()));
}
Also used : Secret(io.fabric8.kubernetes.api.model.Secret) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 73 with TestStorage

use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.

the class AlternativeReconcileTriggersST method testManualTriggeringRollingUpdate.

@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings("checkstyle:MethodLength")
void testManualTriggeringRollingUpdate(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    final String continuousTopicName = "continuous-topic";
    final String continuousProducerName = "continuous-" + testStorage.getProducerName();
    final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
    // 500 messages will take 500 seconds in that case
    final int continuousClientsMessageCount = 500;
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build());
    String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
    String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
    Map<String, String> zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
    // ##############################
    // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
    // ##############################
    // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
    String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
    // Add transactional id to make producer transactional
    producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
    producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
    KafkaClients continuousClients = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
    resourceManager.createResource(extensionContext, continuousClients.producerStrimzi(), continuousClients.consumerStrimzi());
    // ##############################
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // rolling update for kafka
    // set annotation to trigger Kafka rolling update
    LOGGER.info("Annotate Kafka {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, kafkaName);
    StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
    // check annotation to trigger rolling update
    assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
    RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
    // wait when annotation will be removed
    TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // rolling update for zookeeper
    // set annotation to trigger Zookeeper rolling update
    LOGGER.info("Annotate Zookeeper {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, zkName);
    StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
    // check annotation to trigger rolling update
    assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
    RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 3, zkPods);
    // wait when annotation will be removed
    TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Create new topic to ensure, that ZK is working properly
    String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), newTopicName, 1, 1).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(newTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // ##############################
    // Validate that continuous clients finished successfully
    // ##############################
    ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) TestStorage(io.strimzi.systemtest.storage.TestStorage) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 74 with TestStorage

use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.

the class RollingUpdateST method testKafkaAndZookeeperScaleUpScaleDown.

@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(SCALABILITY)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testKafkaAndZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).editSpec().editKafka().addToConfig(singletonMap("default.replication.factor", 1)).addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
    testDockerImagesForKafkaCluster(testStorage.getClusterName(), clusterOperator.getDeploymentNamespace(), testStorage.getNamespaceName(), 3, 3, false);
    LOGGER.info("Running kafkaScaleUpScaleDown {}", testStorage.getClusterName());
    final int initialReplicas = kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withLabelSelector(testStorage.getKafkaSelector()).list().getItems().size();
    assertEquals(3, initialReplicas);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 3, initialReplicas, initialReplicas).build());
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // scale up
    final int scaleTo = initialReplicas + 4;
    LOGGER.info("Scale up Kafka to {}", scaleTo);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
        kafka.getSpec().getKafka().setReplicas(scaleTo);
    }, testStorage.getNamespaceName());
    kafkaPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), scaleTo, kafkaPods);
    LOGGER.info("Kafka scale up to {} finished", scaleTo);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    assertThat((int) kubeClient().listPersistentVolumeClaims().stream().filter(pvc -> pvc.getMetadata().getName().contains(KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()))).count(), is(scaleTo));
    final int zookeeperScaleTo = initialReplicas + 2;
    Map<String, String> zooKeeperPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
    LOGGER.info("Scale up Zookeeper to {}", zookeeperScaleTo);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(zookeeperScaleTo), testStorage.getNamespaceName());
    zooKeeperPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), zookeeperScaleTo, zooKeeperPods);
    LOGGER.info("Kafka scale up to {} finished", zookeeperScaleTo);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // scale down
    LOGGER.info("Scale down Kafka to {}", initialReplicas);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getKafka().setReplicas(initialReplicas), testStorage.getNamespaceName());
    RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), initialReplicas, kafkaPods);
    LOGGER.info("Kafka scale down to {} finished", initialReplicas);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    assertThat(kubeClient().listPersistentVolumeClaims(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(pvc -> pvc.getMetadata().getName().contains("data-" + KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()))).collect(Collectors.toList()).size(), is(initialReplicas));
    // Create new topic to ensure, that ZK is working properly
    String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), newTopicName).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(newTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Quantity(io.fabric8.kubernetes.api.model.Quantity) CoreMatchers.is(org.hamcrest.CoreMatchers.is) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) ConfigMapKeySelector(io.fabric8.kubernetes.api.model.ConfigMapKeySelector) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Map(java.util.Map) Killing(io.strimzi.systemtest.k8s.Events.Killing) Tag(org.junit.jupiter.api.Tag) StUtils(io.strimzi.systemtest.utils.StUtils) Matchers.hasAllOfReasons(io.strimzi.systemtest.matchers.Matchers.hasAllOfReasons) ProbeBuilder(io.strimzi.api.kafka.model.ProbeBuilder) ScraperTemplates(io.strimzi.systemtest.templates.specific.ScraperTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) ACCEPTANCE(io.strimzi.systemtest.Constants.ACCEPTANCE) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) List(java.util.List) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Pattern(java.util.regex.Pattern) SCALABILITY(io.strimzi.systemtest.Constants.SCALABILITY) AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) HashMap(java.util.HashMap) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) TestStorage(io.strimzi.systemtest.storage.TestStorage) ArrayList(java.util.ArrayList) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) JmxPrometheusExporterMetricsBuilder(io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder) YAMLFactory(com.fasterxml.jackson.dataformat.yaml.YAMLFactory) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) TestUtils(io.strimzi.test.TestUtils) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) ComponentType(io.strimzi.systemtest.resources.ComponentType) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Constants(io.strimzi.systemtest.Constants) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) ResourceManager(io.strimzi.systemtest.resources.ResourceManager) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) TestStorage(io.strimzi.systemtest.storage.TestStorage) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 75 with TestStorage

use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.

the class RollingUpdateST method testZookeeperScaleUpScaleDown.

@ParallelNamespaceTest
@Tag(SCALABILITY)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
    // kafka cluster already deployed
    LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", testStorage.getClusterName());
    final int initialZkReplicas = kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withLabelSelector(testStorage.getZookeeperSelector()).list().getItems().size();
    assertThat(initialZkReplicas, is(3));
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    final int scaleZkTo = initialZkReplicas + 4;
    final List<String> newZkPodNames = new ArrayList<String>() {

        {
            for (int i = initialZkReplicas; i < scaleZkTo; i++) {
                add(KafkaResources.zookeeperPodName(testStorage.getClusterName(), i));
            }
        }
    };
    LOGGER.info("Scale up Zookeeper to {}", scaleZkTo);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(scaleZkTo), testStorage.getNamespaceName());
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), scaleZkTo);
    // check the new node is either in leader or follower state
    KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2, 3, 4, 5, 6);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Create new topic to ensure, that ZK is working properly
    String scaleUpTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleUpTopicName, 1, 1).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(scaleUpTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // scale down
    LOGGER.info("Scale down Zookeeper to {}", initialZkReplicas);
    // Get zk-3 uid before deletion
    String uid = kubeClient(testStorage.getNamespaceName()).getPodUid(newZkPodNames.get(3));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(initialZkReplicas), testStorage.getNamespaceName());
    RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), initialZkReplicas);
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    // Wait for one zk pods will became leader and others follower state
    KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2);
    resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Create new topic to ensure, that ZK is working properly
    String scaleDownTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleDownTopicName, 1, 1).build());
    clients = new KafkaClientsBuilder(clients).withTopicName(scaleDownTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
    resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
    ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
    // Test that the second pod has event 'Killing'
    assertThat(kubeClient(testStorage.getNamespaceName()).listEventsByResourceUid(uid), hasAllOfReasons(Killing));
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) ArrayList(java.util.ArrayList) TestStorage(io.strimzi.systemtest.storage.TestStorage) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

TestStorage (io.strimzi.systemtest.storage.TestStorage)210 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)152 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)150 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)150 Tag (org.junit.jupiter.api.Tag)128 KRaftNotSupported (io.strimzi.systemtest.annotations.KRaftNotSupported)94 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)72 Matchers.containsString (org.hamcrest.Matchers.containsString)70 HashMap (java.util.HashMap)54 Secret (io.fabric8.kubernetes.api.model.Secret)46 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)46 Map (java.util.Map)34 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)32 AbstractST (io.strimzi.systemtest.AbstractST)32 Constants (io.strimzi.systemtest.Constants)32 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)32 Environment (io.strimzi.systemtest.Environment)32 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)32 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)32 ClientUtils (io.strimzi.systemtest.utils.ClientUtils)32