Search in sources :

Example 1 with NODEPORT_SUPPORTED

use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.

@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
    ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
    }, namespace);
    // TODO: remove it ?
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
    assertThrows(Exception.class, () -> {
        externalKafkaClientPlain.sendMessagesPlain();
        externalKafkaClientPlain.receiveMessagesPlain();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 2 with NODEPORT_SUPPORTED

use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.

@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Edit listeners - this should cause RU (because of new crts)
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
    // Other external listeners cases are rolling because of crts
    kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 3 with NODEPORT_SUPPORTED

use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi by strimzi.

the class ListenersST method testAdvertisedHostNamesAppearsInBrokerCerts.

@Tag(NODEPORT_SUPPORTED)
@ParallelNamespaceTest
void testAdvertisedHostNamesAppearsInBrokerCerts(ExtensionContext extensionContext) throws CertificateException {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String advertHostInternal0 = "kafka-test.internal.0.net";
    final String advertHostInternal1 = "kafka-test.internal.1.net";
    final String advertHostInternal2 = "kafka-test.internal.2.net";
    List<String> advertHostInternalList = asList(advertHostInternal0, advertHostInternal1, advertHostInternal2);
    final int advertPortInternalListener = 9999;
    final String advertHostExternal0 = "kafka-test.external.0.net";
    final String advertHostExternal1 = "kafka-test.external.1.net";
    final String advertHostExternal2 = "kafka-test.external.2.net";
    List<String> advertHostExternalList = asList(advertHostExternal0, advertHostExternal1, advertHostExternal2);
    final int advertPortExternalListener = 9888;
    GenericKafkaListenerConfigurationBroker brokerInternal0 = new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withAdvertisedHost(advertHostInternal0).withAdvertisedPort(advertPortInternalListener).build();
    GenericKafkaListenerConfigurationBroker brokerInternal1 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerInternal0).withBroker(1).withAdvertisedHost(advertHostInternal1).build();
    GenericKafkaListenerConfigurationBroker brokerInternal2 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerInternal0).withBroker(2).withAdvertisedHost(advertHostInternal2).build();
    GenericKafkaListenerConfigurationBroker brokerExternal0 = new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withAdvertisedHost(advertHostExternal0).withAdvertisedPort(advertPortExternalListener).build();
    GenericKafkaListenerConfigurationBroker brokerExternal1 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerExternal0).withBroker(1).withAdvertisedHost(advertHostExternal1).build();
    GenericKafkaListenerConfigurationBroker brokerExternal2 = new GenericKafkaListenerConfigurationBrokerBuilder(brokerExternal0).withBroker(2).withAdvertisedHost(advertHostExternal2).build();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().editKafka().withListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9098).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withBrokers(asList(brokerInternal0, brokerInternal1, brokerInternal2)).endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9099).withType(KafkaListenerType.NODEPORT).withTls(true).withNewConfiguration().withBrokers(asList(brokerExternal0, brokerExternal1, brokerExternal2)).endConfiguration().build())).endKafka().endSpec().build());
    Map<String, String> secretData = kubeClient().getSecret(namespaceName, KafkaResources.brokersServiceName(clusterName)).getData();
    List<String> kafkaPods = kubeClient().listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND).stream().filter(podName -> podName.contains("kafka")).collect(Collectors.toList());
    int index = 0;
    for (String kafkaBroker : kafkaPods) {
        String cert = secretData.get(kafkaBroker + ".crt");
        LOGGER.info("Encoding {}.crt", kafkaBroker);
        ByteArrayInputStream publicCert = new ByteArrayInputStream(Base64.getDecoder().decode(cert.getBytes()));
        CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
        Certificate certificate = certificateFactory.generateCertificate(publicCert);
        assertThat(certificate.toString(), containsString(advertHostInternalList.get(index)));
        assertThat(certificate.toString(), containsString(advertHostExternalList.get(index++)));
    }
}
Also used : ContainerEnvVarBuilder(io.strimzi.api.kafka.model.ContainerEnvVarBuilder) CoreMatchers.is(org.hamcrest.CoreMatchers.is) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CertificateFactory(java.security.cert.CertificateFactory) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) GenericKafkaListenerConfigurationBrokerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBrokerBuilder) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) INFRA_NAMESPACE(io.strimzi.systemtest.Constants.INFRA_NAMESPACE) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) KafkaUtils.getKafkaSecretCertificates(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils.getKafkaSecretCertificates) KafkaListenerAuthenticationTls(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls) Matcher(java.util.regex.Matcher) SystemTestCertAndKey(io.strimzi.systemtest.security.SystemTestCertAndKey) ByteArrayInputStream(java.io.ByteArrayInputStream) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) ServiceUtils(io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils) StUtils(io.strimzi.systemtest.utils.StUtils) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) ACCEPTANCE(io.strimzi.systemtest.Constants.ACCEPTANCE) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ListenerAddress(io.strimzi.api.kafka.model.status.ListenerAddress) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) SystemTestCertManager.generateRootCaCertAndKey(io.strimzi.systemtest.security.SystemTestCertManager.generateRootCaCertAndKey) CertAndKeyFiles(io.strimzi.systemtest.security.CertAndKeyFiles) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) StandardCharsets(java.nio.charset.StandardCharsets) ListenerStatus(io.strimzi.api.kafka.model.status.ListenerStatus) Base64(java.util.Base64) List(java.util.List) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) Certificate(java.security.cert.Certificate) KafkaUtils.getKafkaStatusCertificates(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils.getKafkaStatusCertificates) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Secret(io.fabric8.kubernetes.api.model.Secret) Pattern(java.util.regex.Pattern) AbstractST(io.strimzi.systemtest.AbstractST) KafkaClientsTemplates(io.strimzi.systemtest.templates.crd.KafkaClientsTemplates) LOADBALANCER_SUPPORTED(io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) OpenShiftOnly(io.strimzi.systemtest.annotations.OpenShiftOnly) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecretUtils(io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils) GenericKafkaListenerConfigurationBroker(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBroker) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) SystemTestCertManager.generateIntermediateCaCertAndKey(io.strimzi.systemtest.security.SystemTestCertManager.generateIntermediateCaCertAndKey) SystemTestCertManager.generateEndEntityCertAndKey(io.strimzi.systemtest.security.SystemTestCertManager.generateEndEntityCertAndKey) Service(io.fabric8.kubernetes.api.model.Service) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) Constants(io.strimzi.systemtest.Constants) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) CertificateException(java.security.cert.CertificateException) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) JsonArray(io.vertx.core.json.JsonArray) AfterEach(org.junit.jupiter.api.AfterEach) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) KafkaUserUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUserUtils) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) Comparator(java.util.Comparator) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) SystemTestCertManager.exportToPemFiles(io.strimzi.systemtest.security.SystemTestCertManager.exportToPemFiles) GenericKafkaListenerConfigurationBrokerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBrokerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) ByteArrayInputStream(java.io.ByteArrayInputStream) GenericKafkaListenerConfigurationBroker(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBroker) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) CertificateFactory(java.security.cert.CertificateFactory) Certificate(java.security.cert.Certificate) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 4 with NODEPORT_SUPPORTED

use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi-kafka-operator by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestart.

@Tag(NODEPORT_SUPPORTED)
@Tag(ROLLING_UPDATE)
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    String kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Edit listeners - this should cause RU (because of new crts)
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("compression.type", "snappy");
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("compression.type=snappy"));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + true));
    // Remove external listeners (node port) - this should cause RU (we need to update advertised.listeners)
    // Other external listeners cases are rolling because of crts
    kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThat(RollingUpdateUtils.componentHasRolled(namespace, kafkaSelector, kafkaPods), is(true));
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n"));
    deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", false);
    updateAndVerifyDynConf(namespace, clusterName, deepCopyOfShardKafkaConfig);
    kafkaConfigurationFromPod = cmdKubeClient().namespace(namespace).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers --entity-name 0 --describe").out();
    assertThat(kafkaConfigurationFromPod, containsString("unclean.leader.election.enable=" + false));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 5 with NODEPORT_SUPPORTED

use of io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED in project strimzi-kafka-operator by strimzi.

the class DynamicConfST method testUpdateToExternalListenerCausesRollingRestartUsingExternalClients.

@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(ROLLING_UPDATE)
void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    Map<String, Object> deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).withConfig(deepCopyOfShardKafkaConfig).endKafka().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(namespace, clusterName, userName).build());
    ExternalKafkaClient externalKafkaClientTls = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    ExternalKafkaClient externalKafkaClientPlain = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain(), externalKafkaClientPlain.receiveMessagesPlain());
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Arrays.asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()));
    }, namespace);
    // TODO: remove it ?
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    externalKafkaClientTls.verifyProducedAndConsumedMessages(externalKafkaClientTls.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClientTls.receiveMessagesTls());
    assertThrows(Exception.class, () -> {
        externalKafkaClientPlain.sendMessagesPlain();
        externalKafkaClientPlain.receiveMessagesPlain();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to tls communication");
    });
    LOGGER.info("Updating listeners of Kafka cluster");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()));
    }, namespace);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, KAFKA_REPLICAS, kafkaPods);
    assertThrows(Exception.class, () -> {
        externalKafkaClientTls.sendMessagesTls();
        externalKafkaClientTls.receiveMessagesTls();
        LOGGER.error("Producer & Consumer did not send and receive messages because external listener is set to plain communication");
    });
    externalKafkaClientPlain.verifyProducedAndConsumedMessages(externalKafkaClientPlain.sendMessagesPlain() + MESSAGE_COUNT, externalKafkaClientPlain.receiveMessagesPlain());
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelSuite(io.strimzi.systemtest.annotations.ParallelSuite) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) HashMap(java.util.HashMap) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) DYNAMIC_CONFIGURATION(io.strimzi.systemtest.Constants.DYNAMIC_CONFIGURATION) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) NODEPORT_SUPPORTED(io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED) ROLLING_UPDATE(io.strimzi.systemtest.Constants.ROLLING_UPDATE) ResourceManager.cmdKubeClient(io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Constants(io.strimzi.systemtest.Constants) EXTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) Collectors(java.util.stream.Collectors) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Logger(org.apache.logging.log4j.Logger) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaUserTemplates(io.strimzi.systemtest.templates.crd.KafkaUserTemplates) TestKafkaVersion(io.strimzi.systemtest.utils.TestKafkaVersion) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) ExternalKafkaClient(io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)6 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)6 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)6 KafkaListenerType (io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType)6 AbstractST (io.strimzi.systemtest.AbstractST)6 Constants (io.strimzi.systemtest.Constants)6 EXTERNAL_CLIENTS_USED (io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED)6 NODEPORT_SUPPORTED (io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED)6 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)6 ParallelSuite (io.strimzi.systemtest.annotations.ParallelSuite)6 ExternalKafkaClient (io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient)6 KafkaResource (io.strimzi.systemtest.resources.crd.KafkaResource)6 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)6 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)6 KafkaUserTemplates (io.strimzi.systemtest.templates.crd.KafkaUserTemplates)5 RollingUpdateUtils (io.strimzi.systemtest.utils.RollingUpdateUtils)5 PodUtils (io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils)5 Collections (java.util.Collections)5 Map (java.util.Map)5 Collectors (java.util.stream.Collectors)5