Search in sources :

Example 11 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class AbstractModel method parseLogging.

/**
 * @param logging The Logging to parse.
 * @param externalCm The external ConfigMap, used if Logging is an instance of ExternalLogging
 * @return The logging properties as a String in log4j/2 properties file format.
 */
public String parseLogging(Logging logging, ConfigMap externalCm) {
    if (logging instanceof InlineLogging) {
        InlineLogging inlineLogging = (InlineLogging) logging;
        OrderedProperties newSettings = getDefaultLogConfig();
        if (inlineLogging.getLoggers() != null) {
            // Inline logging as specified and some loggers are configured
            if (shouldPatchLoggerAppender()) {
                String rootAppenderName = getRootAppenderNamesFromDefaultLoggingConfig(newSettings);
                String newRootLogger = inlineLogging.getLoggers().get("log4j.rootLogger");
                newSettings.addMapPairs(inlineLogging.getLoggers());
                if (newRootLogger != null && !rootAppenderName.isEmpty() && !newRootLogger.contains(",")) {
                    // this should never happen as appender name is added in default configuration
                    LOGGER.debugCr(reconciliation, "Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName);
                    String level = newSettings.asMap().get("log4j.rootLogger");
                    newSettings.addPair("log4j.rootLogger", level + ", " + rootAppenderName);
                }
            } else {
                newSettings.addMapPairs(inlineLogging.getLoggers());
            }
        }
        return createLog4jProperties(newSettings);
    } else if (logging instanceof ExternalLogging) {
        ExternalLogging externalLogging = (ExternalLogging) logging;
        if (externalLogging.getValueFrom() != null && externalLogging.getValueFrom().getConfigMapKeyRef() != null && externalLogging.getValueFrom().getConfigMapKeyRef().getKey() != null) {
            if (externalCm != null && externalCm.getData() != null && externalCm.getData().containsKey(externalLogging.getValueFrom().getConfigMapKeyRef().getKey())) {
                return maybeAddMonitorIntervalToExternalLogging(externalCm.getData().get(externalLogging.getValueFrom().getConfigMapKeyRef().getKey()));
            } else {
                throw new InvalidResourceException(String.format("ConfigMap %s with external logging configuration does not exist or doesn't contain the configuration under the %s key.", externalLogging.getValueFrom().getConfigMapKeyRef().getName(), externalLogging.getValueFrom().getConfigMapKeyRef().getKey()));
            }
        } else {
            throw new InvalidResourceException("Property logging.valueFrom has to be specified when using external logging.");
        }
    } else {
        LOGGER.debugCr(reconciliation, "logging is not set, using default loggers");
        return createLog4jProperties(getDefaultLogConfig());
    }
}
Also used : ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) InlineLogging(io.strimzi.api.kafka.model.InlineLogging)

Example 12 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class KafkaCluster method fromCrd.

@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
    KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
    KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
    KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
    // This also validates that the Kafka version is supported
    result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
    result.setOwnerReference(kafkaAssembly);
    result.setReplicas(kafkaClusterSpec.getReplicas());
    validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
    result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
    if (kafkaClusterSpec.getReadinessProbe() != null) {
        result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
    }
    if (kafkaClusterSpec.getLivenessProbe() != null) {
        result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
    }
    result.setRack(kafkaClusterSpec.getRack());
    String initImage = kafkaClusterSpec.getBrokerRackInitImage();
    if (initImage == null) {
        initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
    }
    result.setInitImage(initImage);
    Logging logging = kafkaClusterSpec.getLogging();
    result.setLogging(logging == null ? new InlineLogging() : logging);
    result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
    if (kafkaClusterSpec.getJvmOptions() != null) {
        result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
    }
    result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
    if (kafkaClusterSpec.getJmxOptions() != null) {
        result.setJmxEnabled(Boolean.TRUE);
        AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
    }
    // Handle Kafka broker configuration
    KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
    configureCruiseControlMetrics(kafkaAssembly, result, configuration);
    validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
    result.setConfiguration(configuration);
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(result, kafkaClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = kafkaClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            result.addWarningCondition(warning);
            result.setStorage(oldStorage);
        } else {
            result.setStorage(newStorage);
        }
    } else {
        result.setStorage(kafkaClusterSpec.getStorage());
    }
    result.setResources(kafkaClusterSpec.getResources());
    // Configure listeners
    if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
        LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
        throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
    }
    List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
    ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
    result.setListeners(listeners);
    // Set authorization
    if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
        if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
            throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
        } else {
            KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
            if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
                LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
                throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
            }
        }
    }
    result.setAuthorization(kafkaClusterSpec.getAuthorization());
    if (kafkaClusterSpec.getTemplate() != null) {
        KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(result, template.getPod());
        ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
        ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
        if (template.getExternalBootstrapService() != null) {
            if (template.getExternalBootstrapService().getMetadata() != null) {
                result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
                result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
            }
        }
        if (template.getPerPodService() != null) {
            if (template.getPerPodService().getMetadata() != null) {
                result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
                result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
            }
        }
        if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
            result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
            result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
        }
        if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
            result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
            result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
        }
        if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
            result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
            result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
        }
        if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
            result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
            result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
        }
        if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
            result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
            result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
        }
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
            result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
            result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
            result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
            result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
            result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
    }
    result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
    return result;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaSpec(io.strimzi.api.kafka.model.KafkaSpec) Storage(io.strimzi.api.kafka.model.storage.Storage) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate)

Example 13 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class KafkaAssemblyOperatorTest method testUpdateZkClusterLogConfig.

@ParameterizedTest
@MethodSource("data")
public void testUpdateZkClusterLogConfig(Params params, VertxTestContext context) {
    setFields(params);
    Kafka kafkaAssembly = getKafkaAssembly("bar");
    InlineLogging logger = new InlineLogging();
    logger.setLoggers(singletonMap("zookeeper.root.logger", "DEBUG"));
    kafkaAssembly.getSpec().getZookeeper().setLogging(logger);
    updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly);
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 14 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class LoggingChangeST method testDynamicallySetBridgeLoggingLevels.

@ParallelNamespaceTest
@Tag(BRIDGE)
@Tag(ROLLING_UPDATE)
void testDynamicallySetBridgeLoggingLevels(ExtensionContext extensionContext) throws InterruptedException {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    InlineLogging ilOff = new InlineLogging();
    Map<String, String> loggers = new HashMap<>();
    loggers.put("rootLogger.level", "OFF");
    loggers.put("logger.bridge.level", "OFF");
    loggers.put("logger.healthy.level", "OFF");
    loggers.put("logger.ready.level", "OFF");
    ilOff.setLoggers(loggers);
    // create resources async
    resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).build(), KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build(), KafkaBridgeTemplates.kafkaBridge(clusterName, KafkaResources.tlsBootstrapAddress(clusterName), 1).editSpec().withInlineLogging(ilOff).endSpec().build());
    KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
    DeploymentUtils.waitForDeploymentReady(namespaceName, kafkaClientsName);
    KafkaBridgeUtils.waitForKafkaBridgeReady(namespaceName, clusterName);
    Map<String, String> bridgeSnapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaBridgeResources.deploymentName(clusterName));
    final String bridgePodName = bridgeSnapshot.keySet().iterator().next();
    LOGGER.info("Asserting if log is without records");
    assertFalse(DEFAULT_LOG4J_PATTERN.matcher(StUtils.getLogFromPodByTime(namespaceName, bridgePodName, "", "30s")).find());
    LOGGER.info("Changing rootLogger level to DEBUG with inline logging");
    InlineLogging ilDebug = new InlineLogging();
    loggers.put("rootLogger.level", "DEBUG");
    loggers.put("logger.bridge.level", "OFF");
    loggers.put("logger.healthy.level", "OFF");
    loggers.put("logger.ready.level", "OFF");
    ilDebug.setLoggers(loggers);
    KafkaBridgeResource.replaceBridgeResourceInSpecificNamespace(clusterName, bridz -> {
        bridz.getSpec().setLogging(ilDebug);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30"));
    TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String bridgeLog = StUtils.getLogFromPodByTime(namespaceName, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "30s");
        return bridgeLog != null && !bridgeLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(bridgeLog).find();
    });
    ConfigMap configMapBridge = new ConfigMapBuilder().withNewMetadata().withName("external-configmap-bridge").withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j2.properties", "name = BridgeConfig\n" + "\n" + "appender.console.type = Console\n" + "appender.console.name = STDOUT\n" + "appender.console.layout.type = PatternLayout\n" + "appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "\n" + "rootLogger.level = OFF\n" + "rootLogger.appenderRefs = console\n" + "rootLogger.appenderRef.console.ref = STDOUT\n" + "rootLogger.additivity = false\n" + "\n" + "logger.bridge.name = io.strimzi.kafka.bridge\n" + "logger.bridge.level = OFF\n" + "logger.bridge.appenderRefs = console\n" + "logger.bridge.appenderRef.console.ref = STDOUT\n" + "logger.bridge.additivity = false\n" + "\n" + "# HTTP OpenAPI specific logging levels (default is INFO)\n" + "# Logging healthy and ready endpoints is very verbose because of Kubernetes health checking.\n" + "logger.healthy.name = http.openapi.operation.healthy\n" + "logger.healthy.level = OFF\n" + "logger.ready.name = http.openapi.operation.ready\n" + "logger.ready.level = OFF")).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMapBridge);
    ExternalLogging bridgeXternalLogging = new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("external-configmap-bridge").withKey("log4j2.properties").build()).endValueFrom().build();
    LOGGER.info("Setting log level of Bridge to OFF - records should not appear in the log");
    // change to the external logging
    KafkaBridgeResource.replaceBridgeResourceInSpecificNamespace(clusterName, bridz -> {
        bridz.getSpec().setLogging(bridgeXternalLogging);
    }, namespaceName);
    LOGGER.info("Waiting for log4j2.properties will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level = OFF") && cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30"));
    TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), Constants.SAFETY_RECONCILIATION_INTERVAL, () -> {
        String bridgeLog = StUtils.getLogFromPodByTime(namespaceName, bridgePodName, KafkaBridgeResources.deploymentName(clusterName), "30s");
        return bridgeLog != null && bridgeLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(bridgeLog).find();
    });
    assertThat("Bridge pod should not roll", DeploymentUtils.depSnapshot(namespaceName, KafkaBridgeResources.deploymentName(clusterName)), equalTo(bridgeSnapshot));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ExternalLogging(io.strimzi.api.kafka.model.ExternalLogging) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 15 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class LoggingChangeST method testLoggingHierarchy.

@ParallelNamespaceTest
void testLoggingHierarchy(ExtensionContext extensionContext) {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String namespaceName = extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get(Constants.NAMESPACE_KEY).toString();
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build(), KafkaClientsTemplates.kafkaClients(kafkaClientsName).build());
    resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, true).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").endSpec().build(), KafkaConnectorTemplates.defaultKafkaConnector(clusterName, clusterName, 1).build());
    String connectorClassName = "org.apache.kafka.connect.file.FileStreamSourceConnector";
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    LOGGER.info("Changing rootLogger level in KafkaConnector to ERROR with inline logging");
    InlineLogging inlineError = new InlineLogging();
    inlineError.setLoggers(Collections.singletonMap("log4j.logger." + connectorClassName, "ERROR"));
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, connect -> connect.getSpec().setLogging(inlineError), namespaceName);
    LOGGER.info("Waiting for Connect API loggers will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/" + connectorClassName).out().contains("ERROR"));
    LOGGER.info("Restarting Kafka connector {} with class name {}", clusterName, connectorClassName);
    cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "-X", "POST", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/connectors/" + clusterName + "/restart");
    KafkaConnectorUtils.waitForConnectorWorkerStatus(namespaceName, kafkaClientsPodName, clusterName, clusterName, "RUNNING");
    LOGGER.info("Checking that logger is same for connector with class name {}", connectorClassName);
    String connectorLogger = cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/" + connectorClassName).out();
    assertTrue(connectorLogger.contains("ERROR"));
    LOGGER.info("Changing KafkaConnect's root logger to WARN, KafkaConnector: {} shouldn't inherit it", clusterName);
    InlineLogging inlineWarn = new InlineLogging();
    inlineWarn.setLoggers(Collections.singletonMap("connect.root.logger.level", "WARN"));
    inlineWarn.setLoggers(Map.of("connect.root.logger.level", "WARN", "log4j.logger." + connectorClassName, "ERROR"));
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, connect -> connect.getSpec().setLogging(inlineWarn), namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("WARN"));
    LOGGER.info("Checking if KafkaConnector {} doesn't inherit logger from KafkaConnect", connectorClassName);
    KafkaConnectorUtils.loggerStabilityWait(namespaceName, clusterName, kafkaClientsPodName, "ERROR", connectorClassName);
}
Also used : InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

InlineLogging (io.strimzi.api.kafka.model.InlineLogging)32 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)16 ExternalLogging (io.strimzi.api.kafka.model.ExternalLogging)12 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)10 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)10 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)10 ExternalLoggingBuilder (io.strimzi.api.kafka.model.ExternalLoggingBuilder)10 Tag (org.junit.jupiter.api.Tag)10 HashMap (java.util.HashMap)8 IntOrString (io.fabric8.kubernetes.api.model.IntOrString)6 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)6 KafkaClusterSpec (io.strimzi.api.kafka.model.KafkaClusterSpec)6 Logging (io.strimzi.api.kafka.model.Logging)6 Kafka (io.strimzi.api.kafka.model.Kafka)4 Condition (io.strimzi.api.kafka.model.status.Condition)4 Storage (io.strimzi.api.kafka.model.storage.Storage)4 MetricsAndLogging (io.strimzi.operator.common.MetricsAndLogging)4 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)4 MethodSource (org.junit.jupiter.params.provider.MethodSource)4 CruiseControlSpec (io.strimzi.api.kafka.model.CruiseControlSpec)2