Search in sources :

Example 86 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testDynamicallySetConnectLoggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
void testDynamicallySetConnectLoggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> loggers = new HashMap<>();
    loggers.put("connect.root.logger.level", "OFF");
    ilOff.setLoggers(loggers);
    // create async
    resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    // sync point
    KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editSpec().withInlineLogging(ilOff).endSpec().editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
    KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
    Map<String, String> connectSnapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaConnectResources.deploymentName(clusterName));
    final String connectPodName = connectSnapshot.keySet().iterator().next();
    LOGGER.info("Asserting if log is without records");
    assertFalse(DEFAULT_LOG4J_PATTERN.matcher(StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s")).find());
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    loggers.put("connect.root.logger.level", "DEBUG");
    ilDebug.setLoggers(loggers);
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, conn -> {
        conn.getSpec().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String kcLog = StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s");
        return kcLog != null && !kcLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(kcLog).find();
    });
    String log4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %X{connector.context}%m (%c) [%t]%n\n" + "log4j.rootLogger=OFF, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.reflections=ERROR";
    String externalCmName = "external-cm";
    ConfigMap connectLoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", log4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(connectLoggingMap);
    ExternalLogging connectXternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(externalCmName).withKey("log4j.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of Connect to OFF");
    // change to the external logging
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, conn -> {
        conn.getSpec().setLogging(connectXternalLogging);
    }, namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("OFF"));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String kcLog = StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s");
        return kcLog != null && kcLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(kcLog).find();
    });
    assertThat("Connect pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaConnectResources.deploymentName(clusterName)), equalTo(connectSnapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 87 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testDynamicallySetUnknownKafkaLoggerValue.

@ParallelNamespaceTest
void testDynamicallySetUnknownKafkaLoggerValue(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    InlineLogging il = new InlineLogging();
    il.setLoggers(Collections.singletonMap("kafka.root.logger.level", "PAPRIKA"));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setLogging(il);
    }, namespaceName);
    RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
    assertThat("Kafka pod should not roll", RollingUpdateUtils.componentHasRolled(namespaceName, kafkaSelector, kafkaPods), is(false));
}
Also used : LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 88 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testDynamicallySetMM2LoggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@Tag(MIRROR_MAKER2)
@Tag(CONNECT_COMPONENTS)
void testDynamicallySetMM2LoggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> loggers = new HashMap<>();
    loggers.put("connect.root.logger.level", "OFF");
    loggers.put("log4j.logger.org.apache.zookeeper", "OFF");
    loggers.put("log4j.logger.org.I0Itec.zkclient", "OFF");
    loggers.put("log4j.logger.org.reflections", "OFF");
    ilOff.setLoggers(loggers);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-source", 3).build());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-target", 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(clusterName, clusterName + "-target", clusterName + "-source", 1, false).editOrNewSpec().withInlineLogging(ilOff).endSpec().build());
    String kafkaMM2PodName = kubeClient().namespace(namespaceName).listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker2.RESOURCE_KIND).get(0).getMetadata().getName();
    String mm2LogCheckCmd = "http://localhost:8083/admin/loggers/root";
    Map<String, String> mm2Snapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && mmLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    loggers.put("connect.root.logger.level", "DEBUG");
    ilDebug.setLoggers(loggers);
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> {
        mm2.getSpec().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", mm2LogCheckCmd).out().contains("DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && !mmLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    String log4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %X{connector.context}%m (%c) [%t]%n\n" + "log4j.rootLogger=OFF, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.reflections=ERROR";
    String externalCmName = "external-cm";
    ConfigMap mm2LoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", log4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(mm2LoggingMap);
    ExternalLogging mm2XternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(externalCmName).withKey("log4j.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of MM2 to OFF");
    // change to the external logging
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> {
        mm2.getSpec().setLogging(mm2XternalLogging);
    }, namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", mm2LogCheckCmd).out().contains("OFF"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && !mmLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    assertThat("MirrorMaker2 pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName)), equalTo(mm2Snapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 89 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testDynamicallySetEOloggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity" })
void testDynamicallySetEOloggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    ilOff.setLoggers(Collections.singletonMap("rootLogger.level", "OFF"));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editSpec().editEntityOperator().editTopicOperator().withInlineLogging(ilOff).endTopicOperator().editUserOperator().withInlineLogging(ilOff).endUserOperator().endEntityOperator().endSpec().build());
    String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
    Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDeploymentName);
    final String eoPodName = eoPods.keySet().iterator().next();
    LOGGER.info("Checking if EO pod contains any log (except configuration)");
    assertFalse(DEFAULT_LOG4J_PATTERN.matcher(StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s")).find());
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    ilDebug.setLoggers(Collections.singletonMap("rootLogger.level", "DEBUG"));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getEntityOperator().getTopicOperator().setLogging(ilDebug);
        k.getSpec().getEntityOperator().getUserOperator().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && !(uoLog.isEmpty() && toLog.isEmpty()) && DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find();
    });
    LOGGER.info("Setting external logging OFF");
    ConfigMap configMapTo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-to").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j2.properties", "name=TOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=OFF\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    ConfigMap configMapUo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-uo").withNamespace(namespaceName).endMetadata().addToData(Collections.singletonMap("log4j2.properties", "name=UOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=OFF\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapTo);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapUo);
    ExternalLogging elTo = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("external-configmap-to").withKey("log4j2.properties").build()).endValueFrom().build();
    ExternalLogging elUo = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("external-configmap-uo").withKey("log4j2.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of TO and UO to OFF - records should not appear in log");
    // change to external logging
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getEntityOperator().getTopicOperator().setLogging(elTo);
        k.getSpec().getEntityOperator().getUserOperator().setLogging(elUo);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=OFF") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=OFF") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("monitorInterval=30") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("monitorInterval=30"));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && uoLog.isEmpty() && toLog.isEmpty() && !(DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find());
    });
    LOGGER.info("Setting external logging OFF");
    configMapTo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-to").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j2.properties", "name=TOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=DEBUG\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    configMapUo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-uo").withNamespace(namespaceName).endMetadata().addToData(Collections.singletonMap("log4j2.properties", "name=UOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=DEBUG\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapTo);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapUo);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && !(uoLog.isEmpty() && toLog.isEmpty()) && DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find();
    });
    assertThat("EO pod should not roll", DeploymentUtils.depSnapshot(namespaceName, eoDeploymentName), equalTo(eoPods));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 90 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class SecurityST method testCertificates.

@ParallelNamespaceTest
void testCertificates(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    LOGGER.info("Running testCertificates {}", clusterName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 2).editSpec().editZookeeper().withReplicas(2).endZookeeper().endSpec().build());
    LOGGER.info("Check Kafka bootstrap certificate");
    String outputCertificate = SystemTestCertManager.generateOpenSslCommandByComponent(namespaceName, KafkaResources.tlsBootstrapAddress(clusterName), KafkaResources.bootstrapServiceName(clusterName), KafkaResources.kafkaPodName(clusterName, 0), "kafka", false);
    LOGGER.info("OPENSSL OUTPUT: \n\n{}\n\n", outputCertificate);
    verifyCerts(clusterName, outputCertificate, "kafka");
    LOGGER.info("Check zookeeper client certificate");
    outputCertificate = SystemTestCertManager.generateOpenSslCommandByComponent(namespaceName, KafkaResources.zookeeperServiceName(clusterName) + ":2181", KafkaResources.zookeeperServiceName(clusterName), KafkaResources.kafkaPodName(clusterName, 0), "kafka");
    verifyCerts(clusterName, outputCertificate, "zookeeper");
    List<String> kafkaPorts = new ArrayList<>(Arrays.asList("9091", "9093"));
    List<String> zkPorts = new ArrayList<>(Arrays.asList("2181", "3888"));
    IntStream.rangeClosed(0, 1).forEach(podId -> {
        String output;
        LOGGER.info("Checking certificates for podId {}", podId);
        for (String kafkaPort : kafkaPorts) {
            LOGGER.info("Check kafka certificate for port {}", kafkaPort);
            output = SystemTestCertManager.generateOpenSslCommandByComponentUsingSvcHostname(namespaceName, KafkaResources.kafkaPodName(clusterName, podId), KafkaResources.brokersServiceName(clusterName), kafkaPort, "kafka");
            verifyCerts(clusterName, output, "kafka");
        }
        for (String zkPort : zkPorts) {
            LOGGER.info("Check zookeeper certificate for port {}", zkPort);
            output = SystemTestCertManager.generateOpenSslCommandByComponentUsingSvcHostname(namespaceName, KafkaResources.zookeeperPodName(clusterName, podId), KafkaResources.zookeeperHeadlessServiceName(clusterName), zkPort, "zookeeper");
            verifyCerts(clusterName, output, "zookeeper");
        }
    });
}
Also used : ArrayList(java.util.ArrayList) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)302 Tag (org.junit.jupiter.api.Tag)166 Matchers.containsString (org.hamcrest.Matchers.containsString)148 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)114 InternalKafkaClient (io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient)112 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)94 KafkaUser (io.strimzi.api.kafka.model.KafkaUser)86 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)76 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)66 HashMap (java.util.HashMap)52 ResourceRequirementsBuilder (io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder)50 TestUtils.fromYamlString (io.strimzi.test.TestUtils.fromYamlString)48 Matchers.emptyOrNullString (org.hamcrest.Matchers.emptyOrNullString)48 ExternalKafkaClient (io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient)42 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)40 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)40 Secret (io.fabric8.kubernetes.api.model.Secret)40 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)38 KafkaListenerAuthenticationTls (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls)36 Pod (io.fabric8.kubernetes.api.model.Pod)32