Search in sources :

Example 6 with KafkaClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi by strimzi.

the class FeatureGatesIsolatedST method testSwitchingStrimziPodSetFeatureGateOnAndOff.

@IsolatedTest
void testSwitchingStrimziPodSetFeatureGateOnAndOff(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    int zkReplicas = 3;
    int kafkaReplicas = 3;
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 500;
    List<EnvVar> coEnvVars = new ArrayList<>();
    coEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-UseStrimziPodSets", null));
    LOGGER.info("Deploying CO with STS - SPS is disabled");
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(coEnvVars).createInstallation().runInstallation();
    LOGGER.info("Deploying Kafka");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, kafkaReplicas, zkReplicas).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    Map<String, String> coPod = DeploymentUtils.depSnapshot(ResourceManager.getCoDeploymentName());
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(1000).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
    LOGGER.info("Changing FG env variable to enable SPS");
    coEnvVars = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME).getSpec().getTemplate().getSpec().getContainers().get(0).getEnv();
    coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("+UseStrimziPodSets");
    Deployment coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
    coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
    kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
    coPod = DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
    zkPods = RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
    KafkaUtils.waitForKafkaReady(clusterName);
    LOGGER.info("Changing FG env variable to disable again SPS");
    coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("");
    coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
    coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
    kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
    DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
    RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
    ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, INFRA_NAMESPACE, messageCount);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) Annotations(io.strimzi.operator.common.Annotations) Random(java.util.Random) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) INFRA_NAMESPACE(io.strimzi.systemtest.Constants.INFRA_NAMESPACE) ArrayList(java.util.ArrayList) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Assumptions.assumeFalse(org.junit.jupiter.api.Assumptions.assumeFalse) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) BeforeAllOnce(io.strimzi.systemtest.BeforeAllOnce) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) JobUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.JobUtils) Constants(io.strimzi.systemtest.Constants) Pod(io.fabric8.kubernetes.api.model.Pod) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) List(java.util.List) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) DeploymentUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils) Logger(org.apache.logging.log4j.Logger) ResourceManager(io.strimzi.systemtest.resources.ResourceManager) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) Random(java.util.Random) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 7 with KafkaClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi by strimzi.

the class DrainCleanerIsolatedST method testDrainCleanerWithComponents.

@IsolatedTest
@RequiredMinKubeApiVersion(version = 1.17)
void testDrainCleanerWithComponents(ExtensionContext extensionContext) {
    TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
    final int replicas = 3;
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endZookeeper().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build());
    drainCleaner.createDrainCleaner(extensionContext);
    String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
    String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
    KafkaClients kafkaBasicExampleClients = new KafkaClientsBuilder().withMessageCount(300).withTopicName(testStorage.getTopicName()).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withDelayMs(1000).build();
    resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
    for (int i = 0; i < replicas; i++) {
        String zkPodName = KafkaResources.zookeeperPodName(testStorage.getClusterName(), i);
        String kafkaPodName = KafkaResources.kafkaPodName(testStorage.getClusterName(), i);
        Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        LOGGER.info("Evicting pods: {} and {}", zkPodName, kafkaPodName);
        kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(zkPodName).evict();
        kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(kafkaPodName).evict();
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
    }
    ClientUtils.waitTillContinuousClientsFinish(testStorage.getProducerName(), testStorage.getConsumerName(), Constants.DRAIN_CLEANER_NAMESPACE, 300);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) AbstractST(io.strimzi.systemtest.AbstractST) IntStream(java.util.stream.IntStream) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) TestStorage(io.strimzi.systemtest.storage.TestStorage) NodeUtils(io.strimzi.systemtest.utils.kubeUtils.objects.NodeUtils) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) KubeClusterResource(io.strimzi.test.k8s.KubeClusterResource) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MultiNodeClusterOnly(io.strimzi.systemtest.annotations.MultiNodeClusterOnly) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) BeforeAllOnce(io.strimzi.systemtest.BeforeAllOnce) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Constants(io.strimzi.systemtest.Constants) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) RequiredMinKubeApiVersion(io.strimzi.systemtest.annotations.RequiredMinKubeApiVersion) AfterEach(org.junit.jupiter.api.AfterEach) List(java.util.List) SetupDrainCleaner(io.strimzi.systemtest.resources.draincleaner.SetupDrainCleaner) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) TestStorage(io.strimzi.systemtest.storage.TestStorage) Map(java.util.Map) RequiredMinKubeApiVersion(io.strimzi.systemtest.annotations.RequiredMinKubeApiVersion) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest)

Example 8 with KafkaClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi by strimzi.

the class SpecificIsolatedST method testRackAware.

@IsolatedTest("UtestRackAwareConnectWrongDeploymentsing more tha one Kafka cluster in one namespace")
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAware(ExtensionContext extensionContext) {
    assumeFalse(Environment.isNamespaceRbacScope());
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String producerName = "hello-world-producer";
    String consumerName = "hello-world-consumer";
    String rackKey = "rack-key";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().endKafka().endSpec().build());
    Affinity kafkaPodSpecAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(KafkaResources.kafkaStatefulSetName(clusterName));
    NodeSelectorRequirement kafkaPodNodeSelectorRequirement = kafkaPodSpecAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    assertThat(kafkaPodNodeSelectorRequirement.getKey(), is(rackKey));
    assertThat(kafkaPodNodeSelectorRequirement.getOperator(), is("Exists"));
    PodAffinityTerm kafkaPodAffinityTerm = kafkaPodSpecAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
    assertThat(kafkaPodAffinityTerm.getTopologyKey(), is(rackKey));
    assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", clusterName));
    assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(clusterName)));
    String rackId = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out();
    assertThat(rackId.trim(), is("zone"));
    String brokerRack = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out();
    assertThat(brokerRack.contains("broker.rack=zone"), is(true));
    String uid = kubeClient().getPodUid(KafkaResources.kafkaPodName(clusterName, 0));
    List<Event> events = kubeClient().listEventsByResourceUid(uid);
    assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started));
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) PodAffinityTerm(io.fabric8.kubernetes.api.model.PodAffinityTerm) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) NodeSelectorRequirement(io.fabric8.kubernetes.api.model.NodeSelectorRequirement) Event(io.fabric8.kubernetes.api.model.Event) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 9 with KafkaClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testStrimziPodSetsFeatureGate.

/**
 * UseStrimziPodSets feature gate
 * https://github.com/strimzi/proposals/blob/main/031-statefulset-removal.md
 */
@IsolatedTest("Feature Gates test for enabled UseStrimziPodSets gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testStrimziPodSetsFeatureGate(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector zooSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 600;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int zooReplicas = 3;
    int kafkaReplicas = 3;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
    resourceManager.createResource(extensionContext, kafkaTopic);
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    JobUtils.waitForJobRunning(consumerName, INFRA_NAMESPACE);
    // Delete one Zoo Pod
    Pod zooPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(INFRA_NAMESPACE, KafkaResources.zookeeperStatefulSetName(clusterName) + "-").get(0);
    LOGGER.info("Delete first found ZooKeeper pod {}", zooPod.getMetadata().getName());
    kubeClient(INFRA_NAMESPACE).deletePod(INFRA_NAMESPACE, zooPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(zooSelector, zooReplicas);
    // Delete one Kafka Pod
    Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(INFRA_NAMESPACE, KafkaResources.kafkaStatefulSetName(clusterName) + "-").get(0);
    LOGGER.info("Delete first found Kafka broker pod {}", kafkaPod.getMetadata().getName());
    kubeClient(INFRA_NAMESPACE).deletePod(INFRA_NAMESPACE, kafkaPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
    // Roll Zoo
    LOGGER.info("Force Rolling Update of ZooKeeper via annotation.");
    Map<String, String> zooPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zooSelector);
    zooPods.keySet().forEach(podName -> {
        kubeClient(INFRA_NAMESPACE).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, zooSelector, zooReplicas, zooPods);
    // Roll Kafka
    LOGGER.info("Force Rolling Update of Kafka via annotation.");
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    kafkaPods.keySet().forEach(podName -> {
        kubeClient(INFRA_NAMESPACE).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Pod(io.fabric8.kubernetes.api.model.Pod) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 10 with KafkaClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testControlPlaneListenerFeatureGate.

/**
 * Control Plane Listener
 * https://github.com/strimzi/proposals/blob/main/025-control-plain-listener.md
 */
@IsolatedTest("Feature Gates test for disabled ControlPlainListener")
@Tag(INTERNAL_CLIENTS_USED)
public void testControlPlaneListenerFeatureGate(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    int messageCount = 300;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 3;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-ControlPlaneListener", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
    LOGGER.info("Check for presence of ContainerPort 9090/tcp (tcp-ctrlplane) in first Kafka pod.");
    final Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(INFRA_NAMESPACE, clusterName + "-kafka-").get(0);
    ContainerPort expectedControlPlaneContainerPort = new ContainerPort(9090, null, null, "tcp-ctrlplane", "TCP");
    List<ContainerPort> kafkaPodPorts = kafkaPod.getSpec().getContainers().get(0).getPorts();
    assertTrue(kafkaPodPorts.contains(expectedControlPlaneContainerPort));
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
    resourceManager.createResource(extensionContext, kafkaTopic);
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    JobUtils.waitForJobRunning(consumerName, INFRA_NAMESPACE);
    LOGGER.info("Delete first found Kafka broker pod.");
    kubeClient(INFRA_NAMESPACE).deletePod(INFRA_NAMESPACE, kafkaPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
    LOGGER.info("Force Rolling Update of Kafka via annotation.");
    kafkaPods.keySet().forEach(podName -> {
        kubeClient(INFRA_NAMESPACE).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Pod(io.fabric8.kubernetes.api.model.Pod) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)40 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)40 Tag (org.junit.jupiter.api.Tag)22 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)12 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)12 IsolatedTest (io.strimzi.systemtest.annotations.IsolatedTest)12 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)12 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)12 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)12 ClientUtils (io.strimzi.systemtest.utils.ClientUtils)12 List (java.util.List)12 ExtensionContext (org.junit.jupiter.api.extension.ExtensionContext)12 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)10 AbstractST (io.strimzi.systemtest.AbstractST)10 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)10 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)10 Random (java.util.Random)10 LogManager (org.apache.logging.log4j.LogManager)10 Logger (org.apache.logging.log4j.Logger)10 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)8