Search in sources :

Example 1 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class OauthPlainIsolatedST method testProducerConsumerBridge.

@Description("As a oauth bridge, I should be able to send messages to bridge endpoint.")
@ParallelTest
@Tag(BRIDGE)
void testProducerConsumerBridge(ExtensionContext extensionContext) {
    String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
    String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withOauthClientId(OAUTH_CLIENT_NAME).withOauthClientSecret(OAUTH_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
    resourceManager.createResource(extensionContext, oauthExampleClients.producerStrimziOauthPlain());
    ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
    resourceManager.createResource(extensionContext, oauthExampleClients.consumerStrimziOauthPlain());
    ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(INFRA_NAMESPACE, consumerName);
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(INFRA_NAMESPACE, false, kafkaClientsName).build());
    // needed for a verification of oauth configuration
    InlineLogging ilDebug = new InlineLogging();
    ilDebug.setLoggers(Map.of("rootLogger.level", "DEBUG"));
    resourceManager.createResource(extensionContext, KafkaBridgeTemplates.kafkaBridge(oauthClusterName, KafkaResources.plainBootstrapAddress(oauthClusterName), 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().withNewKafkaClientAuthenticationOAuth().withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientId("kafka-bridge").withNewClientSecret().withSecretName(BRIDGE_OAUTH_SECRET).withKey(OAUTH_KEY).endClientSecret().withConnectTimeoutSeconds(CONNECT_TIMEOUT_S).withReadTimeoutSeconds(READ_TIMEOUT_S).endKafkaClientAuthenticationOAuth().withLogging(ilDebug).endSpec().build());
    final String kafkaBridgePodName = kubeClient(INFRA_NAMESPACE).listPods(INFRA_NAMESPACE, oauthClusterName, Labels.STRIMZI_KIND_LABEL, KafkaBridge.RESOURCE_KIND).get(0).getMetadata().getName();
    final String kafkaBridgeLogs = KubeClusterResource.cmdKubeClient(INFRA_NAMESPACE).execInCurrentNamespace(Level.DEBUG, "logs", kafkaBridgePodName).out();
    verifyOauthConfiguration(kafkaBridgeLogs);
    String bridgeProducerName = "bridge-producer-" + clusterName;
    BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(bridgeProducerName).withBootstrapAddress(KafkaBridgeResources.serviceName(oauthClusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withPort(HTTP_BRIDGE_DEFAULT_PORT).withDelayMs(1000).withPollInterval(1000).build();
    resourceManager.createResource(extensionContext, kafkaBridgeClientJob.producerStrimziBridge());
    ClientUtils.waitForClientSuccess(bridgeProducerName, INFRA_NAMESPACE, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(INFRA_NAMESPACE, bridgeProducerName);
}
Also used : KafkaOauthClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClientsBuilder) BridgeClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder) BridgeClients(io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaOauthClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClients) Description(io.vertx.core.cli.annotations.Description) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest) Tag(org.junit.jupiter.api.Tag)

Example 2 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class LoggingChangeST method testDynamicallySetMM2LoggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@Tag(MIRROR_MAKER2)
@Tag(CONNECT_COMPONENTS)
void testDynamicallySetMM2LoggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> loggers = new HashMap<>();
    loggers.put("connect.root.logger.level", "OFF");
    loggers.put("log4j.logger.org.apache.zookeeper", "OFF");
    loggers.put("log4j.logger.org.I0Itec.zkclient", "OFF");
    loggers.put("log4j.logger.org.reflections", "OFF");
    ilOff.setLoggers(loggers);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-source", 3).build());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName + "-target", 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(clusterName, clusterName + "-target", clusterName + "-source", 1, false).editOrNewSpec().withInlineLogging(ilOff).endSpec().build());
    String kafkaMM2PodName = kubeClient().namespace(namespaceName).listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker2.RESOURCE_KIND).get(0).getMetadata().getName();
    String mm2LogCheckCmd = "http://localhost:8083/admin/loggers/root";
    Map<String, String> mm2Snapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && mmLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    loggers.put("connect.root.logger.level", "DEBUG");
    ilDebug.setLoggers(loggers);
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> {
        mm2.getSpec().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", mm2LogCheckCmd).out().contains("DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && !mmLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    String log4jConfig = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %X{connector.context}%m (%c) [%t]%n\n" + "log4j.rootLogger=OFF, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=ERROR\n" + "log4j.logger.org.I0Itec.zkclient=ERROR\n" + "log4j.logger.org.reflections=ERROR";
    String externalCmName = "external-cm";
    ConfigMap mm2LoggingMap = new ConfigMapBuilder().withNewMetadata().addToLabels("app", "strimzi").withName(externalCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", log4jConfig)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(mm2LoggingMap);
    ExternalLogging mm2XternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(externalCmName).withKey("log4j.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of MM2 to OFF");
    // change to the external logging
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> {
        mm2.getSpec().setLogging(mm2XternalLogging);
    }, namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaMM2PodName, "curl", mm2LogCheckCmd).out().contains("OFF"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String mmLog = StUtils.getLogFromPodByTime(namespaceName, kafkaMM2PodName, "", "30s");
        return mmLog != null && !mmLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(mmLog).find();
    });
    assertThat("MirrorMaker2 pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName)), equalTo(mm2Snapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 3 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class LoggingChangeST method testDynamicallySetEOloggingLevels.

@ParallelNamespaceTest
@Tag(ROLLING_UPDATE)
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity" })
void testDynamicallySetEOloggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    ilOff.setLoggers(Collections.singletonMap("rootLogger.level", "OFF"));
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editSpec().editEntityOperator().editTopicOperator().withInlineLogging(ilOff).endTopicOperator().editUserOperator().withInlineLogging(ilOff).endUserOperator().endEntityOperator().endSpec().build());
    String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
    Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDeploymentName);
    final String eoPodName = eoPods.keySet().iterator().next();
    LOGGER.info("Checking if EO pod contains any log (except configuration)");
    assertFalse(DEFAULT_LOG4J_PATTERN.matcher(StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s")).find());
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    ilDebug.setLoggers(Collections.singletonMap("rootLogger.level", "DEBUG"));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getEntityOperator().getTopicOperator().setLogging(ilDebug);
        k.getSpec().getEntityOperator().getUserOperator().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && !(uoLog.isEmpty() && toLog.isEmpty()) && DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find();
    });
    LOGGER.info("Setting external logging OFF");
    ConfigMap configMapTo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-to").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j2.properties", "name=TOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=OFF\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    ConfigMap configMapUo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-uo").withNamespace(namespaceName).endMetadata().addToData(Collections.singletonMap("log4j2.properties", "name=UOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=OFF\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapTo);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapUo);
    ExternalLogging elTo = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("external-configmap-to").withKey("log4j2.properties").build()).endValueFrom().build();
    ExternalLogging elUo = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("external-configmap-uo").withKey("log4j2.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of TO and UO to OFF - records should not appear in log");
    // change to external logging
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getEntityOperator().getTopicOperator().setLogging(elTo);
        k.getSpec().getEntityOperator().getUserOperator().setLogging(elUo);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=OFF") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=OFF") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("monitorInterval=30") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("monitorInterval=30"));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && uoLog.isEmpty() && toLog.isEmpty() && !(DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find());
    });
    LOGGER.info("Setting external logging OFF");
    configMapTo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-to").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j2.properties", "name=TOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=DEBUG\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    configMapUo = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-uo").withNamespace(namespaceName).endMetadata().addToData(Collections.singletonMap("log4j2.properties", "name=UOConfig\n" + "appender.console.type=Console\n" + "appender.console.name=STDOUT\n" + "appender.console.layout.type=PatternLayout\n" + "appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "rootLogger.level=DEBUG\n" + "rootLogger.appenderRefs=stdout\n" + "rootLogger.appenderRef.console.ref=STDOUT\n" + "rootLogger.additivity=false")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapTo);
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapUo);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "topic-operator", "cat", "/opt/topic-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, eoPodName, "user-operator", "cat", "/opt/user-operator/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String uoLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "user-operator", "30s");
        String toLog = StUtils.getLogFromPodByTime(namespaceName, eoPodName, "topic-operator", "30s");
        return uoLog != null && toLog != null && !(uoLog.isEmpty() && toLog.isEmpty()) && DEFAULT_LOG4J_PATTERN.matcher(uoLog).find() && DEFAULT_LOG4J_PATTERN.matcher(toLog).find();
    });
    assertThat("EO pod should not roll", DeploymentUtils.depSnapshot(namespaceName, eoDeploymentName), equalTo(eoPods));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 4 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class LoggingChangeST method testDynamicallySetKafkaLoggingLevels.

@ParallelNamespaceTest
void testDynamicallySetKafkaLoggingLevels(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> log4jConfig = new HashMap<>();
    log4jConfig.put("kafka.root.logger.level", "OFF");
    log4jConfig.put("log4j.logger.org.I0Itec.zkclient.ZkClient", "OFF");
    log4jConfig.put("log4j.logger.org.apache.zookeeper", "OFF");
    log4jConfig.put("log4j.logger.kafka", "OFF");
    log4jConfig.put("log4j.logger.org.apache.kafka", "OFF");
    log4jConfig.put("log4j.logger.kafka.request.logger", "OFF, CONSOLE");
    log4jConfig.put("log4j.logger.kafka.network.Processor", "OFF");
    log4jConfig.put("log4j.logger.kafka.server.KafkaApis", "OFF");
    log4jConfig.put("log4j.logger.kafka.network.RequestChannel$", "OFF");
    log4jConfig.put("log4j.logger.kafka.controller", "OFF");
    log4jConfig.put("log4j.logger.kafka.log.LogCleaner", "OFF");
    log4jConfig.put("log4j.logger.state.change.logger", "OFF");
    log4jConfig.put("log4j.logger.kafka.authorizer.logger", "OFF");
    ilOff.setLoggers(log4jConfig);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withInlineLogging(ilOff).endKafka().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        boolean[] correctLogging = { true };
        kafkaPods.keySet().forEach(podName -> {
            String kafkaLog = StUtils.getLogFromPodByTime(namespaceName, podName, "kafka", "30s");
            correctLogging[0] = kafkaLog != null && kafkaLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(kafkaLog).find();
        });
        return correctLogging[0];
    });
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    ilDebug.setLoggers(Collections.singletonMap("kafka.root.logger.level", "DEBUG"));
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for dynamic change in the kafka pod");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type broker-loggers --entity-name 0").out().contains("root=DEBUG"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        boolean[] correctLogging = { true };
        kafkaPods.keySet().forEach(podName -> {
            String kafkaLog = StUtils.getLogFromPodByTime(namespaceName, podName, "kafka", "30s");
            correctLogging[0] = kafkaLog != null && !kafkaLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(kafkaLog).find();
        });
        return correctLogging[0];
    });
    LOGGER.info("Setting external logging INFO");
    ConfigMap configMap = new ConfigMapBuilder().withNewMetadata().withName("external-configmap").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=INFO, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO\n" + "log4j.logger.kafka.request.logger=WARN\n" + "log4j.logger.kafka.network.Processor=ERROR\n" + "log4j.logger.kafka.server.KafkaApis=ERROR\n" + "log4j.logger.kafka.network.RequestChannel$=WARN\n" + "log4j.logger.kafka.controller=TRACE\n" + "log4j.logger.kafka.log.LogCleaner=INFO\n" + "log4j.logger.state.change.logger=TRACE\n" + "log4j.logger.kafka.authorizer.logger=INFO")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMap);
    ExternalLogging elKafka = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName("external-configmap").withOptional(false).build()).endValueFrom().build();
    LOGGER.info("Setting log level of kafka INFO");
    // change to external logging
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
        k.getSpec().getKafka().setLogging(elKafka);
    }, namespaceName);
    LOGGER.info("Waiting for dynamic change in the kafka pod");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type broker-loggers --entity-name 0").out().contains("root=INFO"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        boolean[] correctLogging = { true };
        kafkaPods.keySet().forEach(podName -> {
            String kafkaLog = StUtils.getLogFromPodByTime(namespaceName, podName, "kafka", "30s");
            correctLogging[0] = kafkaLog != null && !kafkaLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(kafkaLog).find();
        });
        return correctLogging[0];
    });
    assertThat("Kafka pod should not roll", RollingUpdateUtils.componentHasRolled(namespaceName, kafkaSelector, kafkaPods), is(false));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 5 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class KafkaCluster method fromCrd.

@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
    KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
    KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
    KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
    // This also validates that the Kafka version is supported
    result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
    result.setOwnerReference(kafkaAssembly);
    result.setReplicas(kafkaClusterSpec.getReplicas());
    validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
    result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
    if (kafkaClusterSpec.getReadinessProbe() != null) {
        result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
    }
    if (kafkaClusterSpec.getLivenessProbe() != null) {
        result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
    }
    result.setRack(kafkaClusterSpec.getRack());
    String initImage = kafkaClusterSpec.getBrokerRackInitImage();
    if (initImage == null) {
        initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
    }
    result.setInitImage(initImage);
    Logging logging = kafkaClusterSpec.getLogging();
    result.setLogging(logging == null ? new InlineLogging() : logging);
    result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
    if (kafkaClusterSpec.getJvmOptions() != null) {
        result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
    }
    result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
    if (kafkaClusterSpec.getJmxOptions() != null) {
        result.setJmxEnabled(Boolean.TRUE);
        AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
    }
    // Handle Kafka broker configuration
    KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
    configureCruiseControlMetrics(kafkaAssembly, result, configuration);
    validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
    result.setConfiguration(configuration);
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(result, kafkaClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = kafkaClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            result.addWarningCondition(warning);
            result.setStorage(oldStorage);
        } else {
            result.setStorage(newStorage);
        }
    } else {
        result.setStorage(kafkaClusterSpec.getStorage());
    }
    result.setResources(kafkaClusterSpec.getResources());
    // Configure listeners
    if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
        LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
        throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
    }
    List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
    ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
    result.setListeners(listeners);
    // Set authorization
    if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
        if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
            throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
        } else {
            KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
            if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
                LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
                throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
            }
        }
    }
    result.setAuthorization(kafkaClusterSpec.getAuthorization());
    if (kafkaClusterSpec.getTemplate() != null) {
        KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(result, template.getPod());
        ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
        ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
        if (template.getExternalBootstrapService() != null) {
            if (template.getExternalBootstrapService().getMetadata() != null) {
                result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
                result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
            }
        }
        if (template.getPerPodService() != null) {
            if (template.getPerPodService().getMetadata() != null) {
                result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
                result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
            }
        }
        if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
            result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
            result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
        }
        if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
            result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
            result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
        }
        if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
            result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
            result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
        }
        if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
            result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
            result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
        }
        if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
            result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
            result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
        }
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
            result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
            result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
            result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
            result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
            result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
    }
    result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
    return result;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaSpec(io.strimzi.api.kafka.model.KafkaSpec) Storage(io.strimzi.api.kafka.model.storage.Storage) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate)

Aggregations

InlineLogging (io.strimzi.api.kafka.model.InlineLogging)16 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)8 ExternalLogging (io.strimzi.api.kafka.model.ExternalLogging)6 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)5 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)5 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)5 ExternalLoggingBuilder (io.strimzi.api.kafka.model.ExternalLoggingBuilder)5 Tag (org.junit.jupiter.api.Tag)5 HashMap (java.util.HashMap)4 IntOrString (io.fabric8.kubernetes.api.model.IntOrString)3 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)3 KafkaClusterSpec (io.strimzi.api.kafka.model.KafkaClusterSpec)3 Logging (io.strimzi.api.kafka.model.Logging)3 Kafka (io.strimzi.api.kafka.model.Kafka)2 Condition (io.strimzi.api.kafka.model.status.Condition)2 Storage (io.strimzi.api.kafka.model.storage.Storage)2 MetricsAndLogging (io.strimzi.operator.common.MetricsAndLogging)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 MethodSource (org.junit.jupiter.params.provider.MethodSource)2 CruiseControlSpec (io.strimzi.api.kafka.model.CruiseControlSpec)1