Search in sources :

Example 21 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi by strimzi.

the class QuotasST method testKafkaQuotasPluginIntegration.

/**
 * Test to check Kafka Quotas Plugin for disk space
 */
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaQuotasPluginIntegration(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    final String producerName = "quotas-producer";
    final String consumerName = "quotas-consumer";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1).editSpec().editKafka().addToConfig("client.quota.callback.class", "io.strimzi.kafka.quotas.StaticQuotaCallback").addToConfig("client.quota.callback.static.storage.hard", "55000000").addToConfig("client.quota.callback.static.storage.soft", "50000000").addToConfig("client.quota.callback.static.storage.check-interval", "5").withNewPersistentClaimStorage().withSize("1Gi").endPersistentClaimStorage().endKafka().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    // Send more messages than disk can store to see if the integration works
    KafkaClients basicClients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(100000000).withDelayMs(0).withMessage(String.join("", Collections.nCopies(1000, "#"))).build();
    resourceManager.createResource(extensionContext, basicClients.producerStrimzi());
    // Kafka Quotas Plugin should stop producer in around 10-20 seconds with configured throughput
    assertThrows(WaitException.class, () -> JobUtils.waitForJobFailure(producerName, INFRA_NAMESPACE, 120_000));
    String kafkaLog = kubeClient(namespaceName).logs(KafkaResources.kafkaPodName(clusterName, 0));
    String softLimitLog = "disk is beyond soft limit";
    String hardLimitLog = "disk is full";
    assertThat("Kafka log doesn't contain '" + softLimitLog + "' log", kafkaLog, CoreMatchers.containsString(softLimitLog));
    assertThat("Kafka log doesn't contain '" + hardLimitLog + "' log", kafkaLog, CoreMatchers.containsString(hardLimitLog));
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 22 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi by strimzi.

the class OauthScopeIsolatedST method testClientScopeKafkaSetCorrectly.

@ParallelTest
void testClientScopeKafkaSetCorrectly(ExtensionContext extensionContext) throws UnexpectedException {
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
    final String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    KafkaClients oauthInternalClientChecksJob = new KafkaClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.bootstrapServiceName(oauthClusterName) + ":" + scopeListenerPort).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withAdditionalConfig(additionalOauthConfig).build();
    // clientScope is set to 'test' by default
    // verification phase the KafkaClient to authenticate.
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
    resourceManager.createResource(extensionContext, oauthInternalClientChecksJob.producerStrimzi());
    // client should succeeded because we set to `clientScope=test` and also Kafka has `scope=test`
    ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 23 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi by strimzi.

the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.

@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
    TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
    String rackKey = "rack-key";
    final int replicas = 3;
    int size = 5;
    List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
    List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
    List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
    List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
    topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
    drainCleaner.createDrainCleaner(extensionContext);
    String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
    String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
    Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
    // remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
    nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
    String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
    KafkaClients kafkaBasicExampleClients;
    for (int i = 0; i < size; i++) {
        kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
        resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
    }
    LOGGER.info("Starting Node drain");
    nodesWithPods.forEach((nodeName, podList) -> {
        String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
        String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
        Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        NodeUtils.drainNode(nodeName);
        NodeUtils.cordonNode(nodeName, true);
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
    });
    producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
    producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
    consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) IntStream(java.util.stream.IntStream) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) TestStorage(io.strimzi.systemtest.storage.TestStorage) NodeUtils(io.strimzi.systemtest.utils.kubeUtils.objects.NodeUtils) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) KubeClusterResource(io.strimzi.test.k8s.KubeClusterResource) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MultiNodeClusterOnly(io.strimzi.systemtest.annotations.MultiNodeClusterOnly) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) BeforeAllOnce(io.strimzi.systemtest.BeforeAllOnce) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Constants(io.strimzi.systemtest.Constants) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) RequiredMinKubeApiVersion(io.strimzi.systemtest.annotations.RequiredMinKubeApiVersion) AfterEach(org.junit.jupiter.api.AfterEach) List(java.util.List) SetupDrainCleaner(io.strimzi.systemtest.resources.draincleaner.SetupDrainCleaner) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) List(java.util.List) Map(java.util.Map) MultiNodeClusterOnly(io.strimzi.systemtest.annotations.MultiNodeClusterOnly) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest)

Example 24 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi by strimzi.

the class AbstractUpgradeST method setupEnvAndUpgradeClusterOperator.

protected void setupEnvAndUpgradeClusterOperator(ExtensionContext extensionContext, JsonObject testParameters, String producerName, String consumerName, String continuousTopicName, String continuousConsumerGroup, String kafkaVersion, String namespace) throws IOException {
    int continuousClientsMessageCount = testParameters.getJsonObject("client").getInteger("continuousClientsMessages");
    LOGGER.info("Test upgrade of ClusterOperator from version {} to version {}", testParameters.getString("fromVersion"), testParameters.getString("toVersion"));
    cluster.setNamespace(namespace);
    String operatorVersion = testParameters.getString("fromVersion");
    String url = null;
    File dir = null;
    if ("HEAD".equals(testParameters.getString("fromVersion"))) {
        coDir = new File(TestUtils.USER_PATH + "/../packaging/install/cluster-operator");
    } else {
        url = testParameters.getString("urlFrom");
        dir = FileUtils.downloadAndUnzip(url);
        coDir = new File(dir, testParameters.getString("fromExamples") + "/install/cluster-operator/");
    }
    // Modify + apply installation files
    copyModifyApply(coDir, namespace, extensionContext, testParameters.getString("strimziFeatureGatesFlagsBefore"));
    LOGGER.info("Waiting for {} deployment", ResourceManager.getCoDeploymentName());
    DeploymentUtils.waitForDeploymentAndPodsReady(ResourceManager.getCoDeploymentName(), 1);
    LOGGER.info("{} is ready", ResourceManager.getCoDeploymentName());
    if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL, operatorVersion)).contains(clusterName)) {
        // Deploy a Kafka cluster
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withVersion(kafkaVersion).addToConfig("log.message.format.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).messageVersion()).addToConfig("inter.broker.protocol.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).protocolVersion()).endKafka().endSpec().build());
        } else {
            kafkaYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/kafka/kafka-persistent.yaml");
            LOGGER.info("Deploy Kafka from: {}", kafkaYaml.getPath());
            // Change kafka version of it's empty (null is for remove the version)
            cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, kafkaVersion));
            // Wait for readiness
            waitForReadinessOfKafkaCluster();
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion)).contains(userName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
        } else {
            kafkaUserYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/user/kafka-user.yaml");
            LOGGER.info("Deploy KafkaUser from: {}", kafkaUserYaml.getPath());
            cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization"));
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion), userName);
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(topicName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
        } else {
            kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
            LOGGER.info("Deploy KafkaTopic from: {}", kafkaTopicYaml.getPath());
            cmdKubeClient().create(kafkaTopicYaml);
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), topicName);
        }
    }
    // Create bunch of topics for upgrade if it's specified in configuration
    if (testParameters.getBoolean("generateTopics")) {
        for (int x = 0; x < upgradeTopicCount; x++) {
            if ("HEAD".equals(testParameters.getString("fromVersion"))) {
                resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, topicName + "-" + x, 1, 1, 1).editSpec().withTopicName(topicName + "-" + x).endSpec().build());
            } else {
                kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
                cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + topicName + "-" + x + "\""));
            }
        }
    }
    if (continuousClientsMessageCount != 0) {
        // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
        if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(continuousTopicName)) {
            String pathToTopicExamples = testParameters.getString("fromExamples").equals("HEAD") ? PATH_TO_KAFKA_TOPIC_CONFIG : testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml";
            kafkaTopicYaml = new File(dir, pathToTopicExamples);
            cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + continuousTopicName + "\"").replace("partitions: 1", "partitions: 3").replace("replicas: 1", "replicas: 3") + "    min.insync.replicas: 2");
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), continuousTopicName);
        }
        String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
        KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroup).withDelayMs(1000).build();
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // ##############################
    }
    makeSnapshots();
    logPodImages(clusterName);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) TestUtils(io.strimzi.test.TestUtils) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) File(java.io.File)

Example 25 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi by strimzi.

the class KafkaUpgradeDowngradeIsolatedST method runVersionChange.

@SuppressWarnings({ "checkstyle:MethodLength" })
void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersion, String producerName, String consumerName, String initLogMsgFormat, String initInterBrokerProtocol, int kafkaReplicas, int zkReplicas, ExtensionContext testContext) {
    boolean isUpgrade = initialVersion.isUpgrade(newVersion);
    Map<String, String> kafkaPods;
    boolean sameMinorVersion = initialVersion.protocolVersion().equals(newVersion.protocolVersion());
    if (KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get() == null) {
        LOGGER.info("Deploying initial Kafka version {} with logMessageFormat={} and interBrokerProtocol={}", initialVersion.version(), initLogMsgFormat, initInterBrokerProtocol);
        KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas, zkReplicas).editSpec().editKafka().withVersion(initialVersion.version()).withConfig(null).endKafka().endSpec();
        // Do not set log.message.format.version if it's not passed to method
        if (initLogMsgFormat != null) {
            kafka.editSpec().editKafka().addToConfig("log.message.format.version", initLogMsgFormat).endKafka().endSpec();
        }
        // Do not set inter.broker.protocol.version if it's not passed to method
        if (initInterBrokerProtocol != null) {
            kafka.editSpec().editKafka().addToConfig("inter.broker.protocol.version", initInterBrokerProtocol).endKafka().endSpec();
        }
        resourceManager.createResource(testContext, kafka.build());
        // ##############################
        // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
        // ##############################
        // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
        resourceManager.createResource(testContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
        String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
        KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).build();
        resourceManager.createResource(testContext, kafkaBasicClientJob.producerStrimzi());
        resourceManager.createResource(testContext, kafkaBasicClientJob.consumerStrimzi());
    // ##############################
    } else {
        LOGGER.info("Initial Kafka version (" + initialVersion.version() + ") is already ready");
        kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
        // Wait for log.message.format.version and inter.broker.protocol.version change
        if (!sameMinorVersion && !isUpgrade && !testContext.getDisplayName().contains("DowngradeToOlderMessageFormat")) {
            // In case that init config was set, which means that CR was updated and CO won't do any changes
            KafkaResource.replaceKafkaResource(clusterName, kafka -> {
                LOGGER.info("Kafka config before updating '{}'", kafka.getSpec().getKafka().getConfig().toString());
                Map<String, Object> config = kafka.getSpec().getKafka().getConfig();
                config.put("log.message.format.version", newVersion.messageVersion());
                config.put("inter.broker.protocol.version", newVersion.protocolVersion());
                kafka.getSpec().getKafka().setConfig(config);
                LOGGER.info("Kafka config after updating '{}'", kafka.getSpec().getKafka().getConfig().toString());
            });
            RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
        }
    }
    LOGGER.info("Deployment of initial Kafka version (" + initialVersion.version() + ") complete");
    String zkVersionCommand = "ls libs | grep -Po 'zookeeper-\\K\\d+.\\d+.\\d+' | head -1";
    String zkResult = cmdKubeClient().execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "/bin/bash", "-c", zkVersionCommand).out().trim();
    LOGGER.info("Pre-change Zookeeper version query returned: " + zkResult);
    String kafkaVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(KafkaResources.kafkaPodName(clusterName, 0));
    LOGGER.info("Pre-change Kafka version query returned: " + kafkaVersionResult);
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    LOGGER.info("Updating Kafka CR version field to " + newVersion.version());
    // Change the version in Kafka CR
    KafkaResource.replaceKafkaResource(clusterName, kafka -> {
        kafka.getSpec().getKafka().setVersion(newVersion.version());
    });
    LOGGER.info("Waiting for readiness of new Kafka version (" + newVersion.version() + ") to complete");
    // Wait for the zk version change roll
    zkPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(INFRA_NAMESPACE, zkSelector, zkReplicas, zkPods);
    LOGGER.info("1st Zookeeper roll (image change) is complete");
    // Wait for the kafka broker version change roll
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaPods);
    LOGGER.info("1st Kafka roll (image change) is complete");
    Object currentLogMessageFormat = KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("log.message.format.version");
    Object currentInterBrokerProtocol = KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version");
    if (isUpgrade && !sameMinorVersion) {
        LOGGER.info("Kafka version is increased, two RUs remaining for increasing IBPV and LMFV");
        if (currentInterBrokerProtocol == null) {
            kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaPods);
            LOGGER.info("Kafka roll (inter.broker.protocol.version) is complete");
        }
        // Only Kafka versions before 3.0.0 require the second roll
        if (currentLogMessageFormat == null && TestKafkaVersion.compareDottedVersions(newVersion.protocolVersion(), "3.0") < 0) {
            kafkaPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
            LOGGER.info("Kafka roll (log.message.format.version) is complete");
        }
    }
    LOGGER.info("Deployment of Kafka (" + newVersion.version() + ") complete");
    PodUtils.verifyThatRunningPodsAreStable(KafkaResources.kafkaStatefulSetName(clusterName));
    // Extract the zookeeper version number from the jars in the lib directory
    zkResult = cmdKubeClient().execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "/bin/bash", "-c", zkVersionCommand).out().trim();
    LOGGER.info("Post-change Zookeeper version query returned: " + zkResult);
    assertThat("Zookeeper container had version " + zkResult + " where " + newVersion.zookeeperVersion() + " was expected", zkResult, is(newVersion.zookeeperVersion()));
    // Extract the Kafka version number from the jars in the lib directory
    kafkaVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(KafkaResources.kafkaPodName(clusterName, 0));
    LOGGER.info("Post-change Kafka version query returned: " + kafkaVersionResult);
    assertThat("Kafka container had version " + kafkaVersionResult + " where " + newVersion.version() + " was expected", kafkaVersionResult, is(newVersion.version()));
    if (isUpgrade && !sameMinorVersion) {
        LOGGER.info("Updating kafka config attribute 'log.message.format.version' from '{}' to '{}' version", initialVersion.messageVersion(), newVersion.messageVersion());
        LOGGER.info("Updating kafka config attribute 'inter.broker.protocol.version' from '{}' to '{}' version", initialVersion.protocolVersion(), newVersion.protocolVersion());
        KafkaResource.replaceKafkaResource(clusterName, kafka -> {
            LOGGER.info("Kafka config before updating '{}'", kafka.getSpec().getKafka().getConfig().toString());
            Map<String, Object> config = kafka.getSpec().getKafka().getConfig();
            config.put("log.message.format.version", newVersion.messageVersion());
            config.put("inter.broker.protocol.version", newVersion.protocolVersion());
            kafka.getSpec().getKafka().setConfig(config);
            LOGGER.info("Kafka config after updating '{}'", kafka.getSpec().getKafka().getConfig().toString());
        });
        if (currentLogMessageFormat != null || currentInterBrokerProtocol != null) {
            LOGGER.info("Change of configuration is done manually - RollingUpdate");
            // Wait for the kafka broker version of log.message.format.version change roll
            RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
            LOGGER.info("Kafka roll (log.message.format.version change) is complete");
        } else {
            LOGGER.info("ClusterOperator already changed the configuration, there should be no RollingUpdate");
            PodUtils.verifyThatRunningPodsAreStable(KafkaResources.kafkaStatefulSetName(clusterName));
            assertFalse(RollingUpdateUtils.componentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaPods));
        }
    }
    if (!isUpgrade) {
        LOGGER.info("Verifying that log.message.format attribute updated correctly to version {}", initLogMsgFormat);
        assertThat(Crds.kafkaOperation(kubeClient(INFRA_NAMESPACE).getClient()).inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("log.message.format.version"), is(initLogMsgFormat));
        LOGGER.info("Verifying that inter.broker.protocol.version attribute updated correctly to version {}", initInterBrokerProtocol);
        assertThat(Crds.kafkaOperation(kubeClient(INFRA_NAMESPACE).getClient()).inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"), is(initInterBrokerProtocol));
    } else {
        if (currentLogMessageFormat != null || currentInterBrokerProtocol != null) {
            LOGGER.info("Verifying that log.message.format attribute updated correctly to version {}", newVersion.messageVersion());
            assertThat(Crds.kafkaOperation(kubeClient(INFRA_NAMESPACE).getClient()).inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("log.message.format.version"), is(newVersion.messageVersion()));
            LOGGER.info("Verifying that inter.broker.protocol.version attribute updated correctly to version {}", newVersion.protocolVersion());
            assertThat(Crds.kafkaOperation(kubeClient(INFRA_NAMESPACE).getClient()).inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"), is(newVersion.protocolVersion()));
        }
    }
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder)

Aggregations

KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)40 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)40 Tag (org.junit.jupiter.api.Tag)22 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)12 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)12 IsolatedTest (io.strimzi.systemtest.annotations.IsolatedTest)12 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)12 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)12 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)12 ClientUtils (io.strimzi.systemtest.utils.ClientUtils)12 List (java.util.List)12 ExtensionContext (org.junit.jupiter.api.extension.ExtensionContext)12 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)10 AbstractST (io.strimzi.systemtest.AbstractST)10 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)10 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)10 Random (java.util.Random)10 LogManager (org.apache.logging.log4j.LogManager)10 Logger (org.apache.logging.log4j.Logger)10 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)8