Search in sources :

Example 1 with ROLLING_UPDATE

use of io.strimzi.systemtest.Constants.ROLLING_UPDATE in project strimzi by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.

@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
    ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
    }, namespace);
    // TODO: remove it ?
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
    assertThrows(Exception.class, () -> {
        externalKafkaClientPlain.sendMessagesPlain();
        externalKafkaClientPlain.receiveMessagesPlain();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 2 with ROLLING_UPDATE

use of io.strimzi.systemtest.Constants.ROLLING_UPDATE in project strimzi by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.

@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Edit listeners - this should cause RU (because of new crts)
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
    // Other external listeners cases are rolling because of crts
    kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 3 with ROLLING_UPDATE

use of io.strimzi.systemtest.Constants.ROLLING_UPDATE in project strimzi by strimzi.

the class RollingUpdateST method testClusterOperatorFinishAllRollingUpdates.

@IsolatedTest("Deleting Pod of Shared Cluster Operator")
@Tag(ROLLING_UPDATE)
void testClusterOperatorFinishAllRollingUpdates(ExtensionContext extensionContext) {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
    // Changes to readiness probe should trigger a rolling update
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        kafka.getSpec().getKafka().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
        kafka.getSpec().getZookeeper().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
    }, namespace);
    TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods(namespace).stream().filter(pod -> pod.getStatus().getPhase().equals("Running")).map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).size() < kubeClient().listPods().size());
    LabelSelector coLabelSelector = kubeClient(INFRA_NAMESPACE).getDeployment(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName()).getSpec().getSelector();
    LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
    kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
    LOGGER.info("Cluster Operator pod deleted");
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, zkSelector, 3, zkPods);
    TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods().stream().map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).contains("Pending"));
    LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
    kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
    LOGGER.info("Cluster Operator pod deleted");
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) CoreMatchers.is(org.hamcrest.CoreMatchers.is) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) INFRA_NAMESPACE(io.strimzi.systemtest.Constants.INFRA_NAMESPACE) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) KafkaResources.kafkaStatefulSetName(io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName) MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) ConfigMapKeySelector(io.fabric8.kubernetes.api.model.ConfigMapKeySelector) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Map(java.util.Map) Killing(io.strimzi.systemtest.k8s.Events.Killing) Tag(org.junit.jupiter.api.Tag) StUtils(io.strimzi.systemtest.utils.StUtils) Matchers.hasAllOfReasons(io.strimzi.systemtest.matchers.Matchers.hasAllOfReasons) ProbeBuilder(io.strimzi.api.kafka.model.ProbeBuilder) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) ACCEPTANCE(io.strimzi.systemtest.Constants.ACCEPTANCE) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) List(java.util.List) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Pattern(java.util.regex.Pattern) SCALABILITY(io.strimzi.systemtest.Constants.SCALABILITY) AbstractST(io.strimzi.systemtest.AbstractST) KafkaClientsTemplates(io.strimzi.systemtest.templates.crd.KafkaClientsTemplates) ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) HashMap(java.util.HashMap) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) ArrayList(java.util.ArrayList) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) JmxPrometheusExporterMetricsBuilder(io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder) YAMLFactory(com.fasterxml.jackson.dataformat.yaml.YAMLFactory) TestUtils(io.strimzi.test.TestUtils) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) ComponentType(io.strimzi.systemtest.resources.ComponentType) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Constants(io.strimzi.systemtest.Constants) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) ResourceManager(io.strimzi.systemtest.resources.ResourceManager) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) ProbeBuilder(io.strimzi.api.kafka.model.ProbeBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 4 with ROLLING_UPDATE

use of io.strimzi.systemtest.Constants.ROLLING_UPDATE in project strimzi-kafka-operator by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.

@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Edit listeners - this should cause RU (because of new crts)
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
    // Other external listeners cases are rolling because of crts
    kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 5 with ROLLING_UPDATE

use of io.strimzi.systemtest.Constants.ROLLING_UPDATE in project strimzi-kafka-operator by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.

@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
    ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
    }, namespace);
    // TODO: remove it ?
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
    assertThrows(Exception.class, () -> {
        externalKafkaClientPlain.sendMessagesPlain();
        externalKafkaClientPlain.receiveMessagesPlain();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)6 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)6 AbstractST (io.strimzi.systemtest.AbstractST)6 Constants (io.strimzi.systemtest.Constants)6 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)6 ROLLING_UPDATE (io.strimzi.systemtest.Constants.ROLLING_UPDATE)6 IsolatedTest (io.strimzi.systemtest.annotations.IsolatedTest)6 ParallelSuite (io.strimzi.systemtest.annotations.ParallelSuite)6 KafkaResource (io.strimzi.systemtest.resources.crd.KafkaResource)6 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)6 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)6 KafkaUserTemplates (io.strimzi.systemtest.templates.crd.KafkaUserTemplates)6 RollingUpdateUtils (io.strimzi.systemtest.utils.RollingUpdateUtils)5 PodUtils (io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils)5 Collections (java.util.Collections)5 HashMap (java.util.HashMap)5 Map (java.util.Map)5 Collectors (java.util.stream.Collectors)5 LogManager (org.apache.logging.log4j.LogManager)5 Logger (org.apache.logging.log4j.Logger)5