Search in sources :

Example 11 with ExternalLoggingBuilder

use of io.strimzi.api.kafka.model.ExternalLoggingBuilder in project strimzi by strimzi.

the class LoggingChangeST method testDynamicallySetConnectLoggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
void testDynamicallySetConnectLoggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> loggers = new HashMap<>();
    loggers.put("connect.root.logger.level", "OFF");
    ilOff.setLoggers(loggers);
    // create async
    resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    // sync point
    KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editSpec().withInlineLogging(ilOff).endSpec().editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
    KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
    Map<String, String> connectSnapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaConnectResources.deploymentName(clusterName));
    final String connectPodName = connectSnapshot.keySet().iterator().next();
    LOGGER.info("Asserting if log is without records");
    assertFalse(DEFAULT_LOG4J_PATTERN.matcher(StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s")).find());
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    loggers.put("connect.root.logger.level", "DEBUG");
    ilDebug.setLoggers(loggers);
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, conn -> {
        conn.getSpec().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String kcLog = StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s");
        return kcLog != null && !kcLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(kcLog).find();
    });
    String log4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %X{connector.context}%m (%c) [%t]%n\n" + "log4j.rootLogger=OFF, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.reflections=ERROR";
    String externalCmName = "external-cm";
    ConfigMap connectLoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", log4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(connectLoggingMap);
    ExternalLogging connectXternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(externalCmName).withKey("log4j.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of Connect to OFF");
    // change to the external logging
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, conn -> {
        conn.getSpec().setLogging(connectXternalLogging);
    }, namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("OFF"));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String kcLog = StUtils.getLogFromPodByTime(namespaceName, connectPodName, "", "30s");
        return kcLog != null && kcLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(kcLog).find();
    });
    assertThat("Connect pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaConnectResources.deploymentName(clusterName)), equalTo(connectSnapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 12 with ExternalLoggingBuilder

use of io.strimzi.api.kafka.model.ExternalLoggingBuilder in project strimzi by strimzi.

the class LoggingChangeST method testMM2LoggingLevelsHierarchy.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
void testMM2LoggingLevelsHierarchy(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-source", 3).build());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-target", 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    String log4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=OFF, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.eclipse.jetty.util.thread=FATAL\n" + "log4j.logger.org.apache.kafka.connect.runtime.WorkerTask=OFF\n" + "log4j.logger.org.eclipse.jetty.util.thread.strategy.EatWhatYouKill=OFF\n" + "log4j.logger.org.reflections=ERROR";
    String externalCmName = "external-cm-hierarchy";
    ConfigMap mm2LoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", log4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(mm2LoggingMap);
    ExternalLogging mm2XternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(externalCmName).withKey("log4j.properties").build()).endValueFrom().build();
    resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(clusterName, clusterName + "-target", clusterName + "-source", 1, false).editOrNewSpec().withLogging(mm2XternalLogging).endSpec().build());
    String kafkaMM2PodName = kubeClient().namespace(namespaceName).listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker2.RESOURCE_KIND).get(0).getMetadata().getName();
    Map<String, String> mm2Snapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName));
    LOGGER.info("Waiting for log4j.properties will contain desired settings");
    TestUtils.waitFor("Logger init levels", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", "http://localhost:8083/admin/loggers/root").out().contains("OFF"));
    LOGGER.info("Changing log levels");
    String updatedLog4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=INFO, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.eclipse.jetty.util.thread=WARN\n" + "log4j.logger.org.reflections=ERROR";
    mm2LoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", updatedLog4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(mm2LoggingMap);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", "http://localhost:8083/admin/loggers/root").out().contains("INFO") && // not set logger should inherit parent level (in this case 'org.eclipse.jetty.util.thread')
    cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", "http://localhost:8083/admin/loggers/org.eclipse.jetty.util.thread.strategy.EatWhatYouKill").out().contains("WARN") && // logger with not set parent should inherit root
    cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", "http://localhost:8083/admin/loggers/org.apache.kafka.connect.runtime.WorkerTask").out().contains("INFO"));
    assertThat("MirrorMaker2 pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName)), equalTo(mm2Snapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 13 with ExternalLoggingBuilder

use of io.strimzi.api.kafka.model.ExternalLoggingBuilder in project strimzi by strimzi.

the class RollingUpdateST method testExternalLoggingChangeTriggerRollingUpdate.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
void testExternalLoggingChangeTriggerRollingUpdate(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    // EO dynamic logging is tested in io.strimzi.systemtest.log.LoggingChangeST.testDynamicallySetEOloggingLevels
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(namespaceName, zkSelector);
    String loggersConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]\n" + "kafka.root.logger.level=INFO\n" + "log4j.rootLogger=${kafka.root.logger.level}, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO\n" + "log4j.logger.kafka.request.logger=WARN, CONSOLE\n" + "log4j.logger.kafka.network.Processor=INFO\n" + "log4j.logger.kafka.server.KafkaApis=INFO\n" + "log4j.logger.kafka.network.RequestChannel$=INFO\n" + "log4j.logger.kafka.controller=INFO\n" + "log4j.logger.kafka.log.LogCleaner=INFO\n" + "log4j.logger.state.change.logger=TRACE\n" + "log4j.logger.kafka.authorizer.logger=INFO";
    String configMapLoggersName = "loggers-config-map";
    ConfigMap configMapLoggers = new ConfigMapBuilder().withNewMetadata().withNamespace(namespaceName).withName(configMapLoggersName).endMetadata().addToData("log4j-custom.properties", loggersConfig).build();
    ConfigMapKeySelector log4jLoggimgCMselector = new ConfigMapKeySelectorBuilder().withName(configMapLoggersName).withKey("log4j-custom.properties").build();
    kubeClient(namespaceName).getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapLoggers);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        kafka.getSpec().getKafka().setLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(log4jLoggimgCMselector).endValueFrom().build());
        kafka.getSpec().getZookeeper().setLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(log4jLoggimgCMselector).endValueFrom().build());
    }, namespaceName);
    zkPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, zkSelector, 3, zkPods);
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
    configMapLoggers.getData().put("log4j-custom.properties", loggersConfig.replace("%p %m (%c) [%t]", "%p %m (%c) [%t]%n"));
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapLoggers);
    RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, zkSelector, 3, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ConfigMapKeySelector(io.fabric8.kubernetes.api.model.ConfigMapKeySelector) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 14 with ExternalLoggingBuilder

use of io.strimzi.api.kafka.model.ExternalLoggingBuilder in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testJSONFormatLogging.

@ParallelNamespaceTest
@SuppressWarnings({ "checkstyle:MethodLength" })
void testJSONFormatLogging(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    // In this test scenario we change configuration for CO and we have to be sure, that CO is installed via YAML bundle instead of helm or OLM
    assumeTrue(!Environment.isHelmInstall() && !Environment.isOlmInstall());
    String loggersConfigKafka = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=net.logstash.log4j.JSONEventLayoutV1\n" + "kafka.root.logger.level=INFO\n" + "log4j.rootLogger=${kafka.root.logger.level}, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO\n" + "log4j.logger.kafka.request.logger=WARN, CONSOLE\n" + "log4j.logger.kafka.network.Processor=OFF\n" + "log4j.logger.kafka.server.KafkaApis=OFF\n" + "log4j.logger.kafka.network.RequestChannel$=WARN\n" + "log4j.logger.kafka.controller=TRACE\n" + "log4j.logger.kafka.log.LogCleaner=INFO\n" + "log4j.logger.state.change.logger=TRACE\n" + "log4j.logger.kafka.authorizer.logger=INFO";
    String loggersConfigOperators = "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=JsonLayout\n" + "rootLogger.level=INFO\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false";
    String loggersConfigZookeeper = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=net.logstash.log4j.JSONEventLayoutV1\n" + "zookeeper.root.logger=INFO\n" + "log4j.rootLogger=${zookeeper.root.logger}, CONSOLE";
    String loggersConfigCO = "name = COConfig\n" + "appender.console.type = Console\n" + "appender.console.name = STDOUT\n" + "appender.console.layout.type = JsonLayout\n" + "rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO}\n" + "rootLogger.appenderRefs = stdout\n" + "rootLogger.appenderRef.console.ref = STDOUT\n" + "rootLogger.additivity = false\n" + "logger.kafka.name = org.apache.kafka\n" + "logger.kafka.level = ${env:STRIMZI_AC_LOG_LEVEL:-WARN}\n" + "logger.kafka.additivity = false";
    String configMapOpName = "json-layout-operators";
    String configMapZookeeperName = "json-layout-zookeeper";
    String configMapKafkaName = "json-layout-kafka";
    String configMapCOName = Constants.STRIMZI_DEPLOYMENT_NAME;
    String originalCoLoggers = kubeClient().getClient().configMaps().inNamespace(clusterOperator.getDeploymentNamespace()).withName(configMapCOName).get().getData().get("log4j2.properties");
    ConfigMap configMapKafka = new ConfigMapBuilder().withNewMetadata().withName(configMapKafkaName).withNamespace(namespaceName).endMetadata().addToData("log4j.properties", loggersConfigKafka).build();
    ConfigMapKeySelector kafkaLoggingCMselector = new ConfigMapKeySelectorBuilder().withName(configMapKafkaName).withKey("log4j.properties").build();
    ConfigMap configMapOperators = new ConfigMapBuilder().withNewMetadata().withName(configMapOpName).withNamespace(namespaceName).endMetadata().addToData("log4j2.properties", loggersConfigOperators).build();
    ConfigMapKeySelector operatorsLoggimgCMselector = new ConfigMapKeySelectorBuilder().withName(configMapOpName).withKey("log4j2.properties").build();
    ConfigMap configMapZookeeper = new ConfigMapBuilder().withNewMetadata().withName(configMapZookeeperName).withNamespace(namespaceName).endMetadata().addToData("log4j-custom.properties", loggersConfigZookeeper).build();
    ConfigMapKeySelector zkLoggingCMselector = new ConfigMapKeySelectorBuilder().withName(configMapZookeeperName).withKey("log4j-custom.properties").build();
    ConfigMap configMapCO = new ConfigMapBuilder().withNewMetadata().withName(configMapCOName).withNamespace(INFRA_NAMESPACE).endMetadata().addToData("log4j2.properties", loggersConfigCO).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapKafka);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapOperators);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapZookeeper);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapOperators);
    kubeClient().getClient().configMaps().inNamespace(clusterOperator.getDeploymentNamespace()).createOrReplace(configMapCO);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editOrNewSpec().editKafka().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(kafkaLoggingCMselector).endValueFrom().build()).endKafka().editZookeeper().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(zkLoggingCMselector).endValueFrom().build()).endZookeeper().editEntityOperator().editTopicOperator().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(operatorsLoggimgCMselector).endValueFrom().build()).endTopicOperator().editUserOperator().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(operatorsLoggimgCMselector).endValueFrom().build()).endUserOperator().endEntityOperator().endSpec().build());
    Map<String, String> zkPods = PodUtils.podSnapshot(namespaceName, zkSelector);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
    Map<String, String> operatorSnapshot = DeploymentUtils.depSnapshot(clusterOperator.getDeploymentNamespace(), ResourceManager.getCoDeploymentName());
    StUtils.checkLogForJSONFormat(clusterOperator.getDeploymentNamespace(), operatorSnapshot, ResourceManager.getCoDeploymentName());
    StUtils.checkLogForJSONFormat(namespaceName, kafkaPods, "kafka");
    StUtils.checkLogForJSONFormat(namespaceName, zkPods, "zookeeper");
    StUtils.checkLogForJSONFormat(namespaceName, eoPods, "topic-operator");
    StUtils.checkLogForJSONFormat(namespaceName, eoPods, "user-operator");
    // set loggers of CO back to original
    configMapCO.getData().put("log4j2.properties", originalCoLoggers);
    kubeClient().getClient().configMaps().inNamespace(clusterOperator.getDeploymentNamespace()).createOrReplace(configMapCO);
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ConfigMapKeySelector(io.fabric8.kubernetes.api.model.ConfigMapKeySelector) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 15 with ExternalLoggingBuilder

use of io.strimzi.api.kafka.model.ExternalLoggingBuilder in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testNotExistingCMSetsDefaultLogging.

@ParallelNamespaceTest
void testNotExistingCMSetsDefaultLogging(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String defaultProps = TestUtils.getFileAsString(TestUtils.USER_PATH + "/../cluster-operator/src/main/resources/kafkaDefaultLoggingProperties");
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    String cmData = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=INFO, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO";
    String existingCmName = "external-cm";
    String nonExistingCmName = "non-existing-cm-name";
    ConfigMap configMap = new ConfigMapBuilder().withNewMetadata().withName(existingCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", cmData)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMap);
    LOGGER.info("Deploying Kafka with custom logging");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editOrNewSpec().editKafka().withExternalLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(existingCmName).withOptional(false).build()).endValueFrom().build()).endKafka().endSpec().build());
    String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    String log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertTrue(log4jFile.contains(cmData));
    LOGGER.info("Changing external logging's CM to not existing one");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().getKafka().setLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(nonExistingCmName).withOptional(false).build()).endValueFrom().build()), namespaceName);
    RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
    LOGGER.info("Checking that log4j.properties in custom-config isn't empty and configuration is default");
    log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertFalse(log4jFile.isEmpty());
    assertTrue(log4jFile.contains(cmData));
    assertFalse(log4jFile.contains(defaultProps));
    LOGGER.info("Checking if Kafka:{} contains error about non-existing CM", clusterName);
    Condition condition = KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getConditions().get(0);
    assertThat(condition.getType(), is(CustomResourceStatus.NotReady.toString()));
    assertTrue(condition.getMessage().matches("ConfigMap " + nonExistingCmName + " with external logging configuration does not exist .*"));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) Condition(io.strimzi.api.kafka.model.status.Condition) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)20 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)20 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)20 ExternalLoggingBuilder (io.strimzi.api.kafka.model.ExternalLoggingBuilder)20 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)20 ExternalLogging (io.strimzi.api.kafka.model.ExternalLogging)14 Tag (org.junit.jupiter.api.Tag)12 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)10 InlineLogging (io.strimzi.api.kafka.model.InlineLogging)10 HashMap (java.util.HashMap)8 ConfigMapKeySelector (io.fabric8.kubernetes.api.model.ConfigMapKeySelector)4 Condition (io.strimzi.api.kafka.model.status.Condition)2