Search in sources :

Example 26 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class KafkaAssemblyOperatorTest method testUpdateZkClusterLogConfig.

@ParameterizedTest
@MethodSource("data")
public void testUpdateZkClusterLogConfig(Params params, VertxTestContext context) {
    setFields(params);
    Kafka kafkaAssembly = getKafkaAssembly("bar");
    InlineLogging logger = new InlineLogging();
    logger.setLoggers(singletonMap("zookeeper.root.logger", "DEBUG"));
    kafkaAssembly.getSpec().getZookeeper().setLogging(logger);
    updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly);
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 27 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi by strimzi.

the class KafkaAssemblyOperatorTest method testUpdateClusterLogConfig.

@ParameterizedTest
@MethodSource("data")
public void testUpdateClusterLogConfig(Params params, VertxTestContext context) {
    setFields(params);
    Kafka kafkaAssembly = getKafkaAssembly("bar");
    InlineLogging logger = new InlineLogging();
    logger.setLoggers(singletonMap("kafka.root.logger.level", "DEBUG"));
    kafkaAssembly.getSpec().getKafka().setLogging(logger);
    updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly);
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 28 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class KafkaAssemblyOperatorTest method testUpdateClusterLogConfig.

@ParameterizedTest
@MethodSource("data")
public void testUpdateClusterLogConfig(Params params, VertxTestContext context) {
    setFields(params);
    Kafka kafkaAssembly = getKafkaAssembly("bar");
    InlineLogging logger = new InlineLogging();
    logger.setLoggers(singletonMap("kafka.root.logger.level", "DEBUG"));
    kafkaAssembly.getSpec().getKafka().setLogging(logger);
    updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly);
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 29 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class ZookeeperCluster method fromCrd.

@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity" })
public static ZookeeperCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
    ZookeeperCluster zk = new ZookeeperCluster(reconciliation, kafkaAssembly);
    zk.setOwnerReference(kafkaAssembly);
    ZookeeperClusterSpec zookeeperClusterSpec = kafkaAssembly.getSpec().getZookeeper();
    int replicas = zookeeperClusterSpec.getReplicas();
    if (replicas <= 0) {
        replicas = ZookeeperClusterSpec.DEFAULT_REPLICAS;
    }
    if (replicas == 1 && zookeeperClusterSpec.getStorage() != null && "ephemeral".equals(zookeeperClusterSpec.getStorage().getType())) {
        LOGGER.warnCr(reconciliation, "A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used.");
    }
    zk.setReplicas(replicas);
    String image = zookeeperClusterSpec.getImage();
    if (image == null) {
        KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
        image = versions.kafkaImage(kafkaClusterSpec != null ? kafkaClusterSpec.getImage() : null, kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null);
    }
    zk.setImage(image);
    if (zookeeperClusterSpec.getReadinessProbe() != null) {
        zk.setReadinessProbe(zookeeperClusterSpec.getReadinessProbe());
    }
    if (zookeeperClusterSpec.getLivenessProbe() != null) {
        zk.setLivenessProbe(zookeeperClusterSpec.getLivenessProbe());
    }
    Logging logging = zookeeperClusterSpec.getLogging();
    zk.setLogging(logging == null ? new InlineLogging() : logging);
    zk.setGcLoggingEnabled(zookeeperClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : zookeeperClusterSpec.getJvmOptions().isGcLoggingEnabled());
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(zk, zookeeperClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = zookeeperClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, zookeeperClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Zookeeper storage are allowed: " + "changing the deleteClaim flag, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired ZooKeeper storage configuration in the custom resource {}/{} contains changes which are not allowed. As " + "a result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("ZooKeeperStorage", "The desired ZooKeeper storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            zk.addWarningCondition(warning);
            zk.setStorage(oldStorage);
        } else {
            zk.setStorage(newStorage);
        }
    } else {
        zk.setStorage(zookeeperClusterSpec.getStorage());
    }
    zk.setConfiguration(new ZookeeperConfiguration(reconciliation, zookeeperClusterSpec.getConfig().entrySet()));
    zk.setResources(zookeeperClusterSpec.getResources());
    zk.setJvmOptions(zookeeperClusterSpec.getJvmOptions());
    if (zookeeperClusterSpec.getJmxOptions() != null) {
        zk.isJmxEnabled = true;
        if (zookeeperClusterSpec.getJmxOptions().getAuthentication() != null) {
            zk.isJmxAuthenticated = zookeeperClusterSpec.getJmxOptions().getAuthentication() instanceof KafkaJmxAuthenticationPassword;
        }
    }
    if (zookeeperClusterSpec.getTemplate() != null) {
        ZookeeperClusterTemplate template = zookeeperClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                zk.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                zk.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                zk.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            zk.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            zk.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(zk, template.getPod());
        ModelUtils.parseInternalServiceTemplate(zk, template.getClientService());
        ModelUtils.parseInternalHeadlessServiceTemplate(zk, template.getNodesService());
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            zk.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), zk.templateStatefulSetLabels);
            zk.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getEnv() != null) {
            zk.templateZookeeperContainerEnvVars = template.getZookeeperContainer().getEnv();
        }
        if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getSecurityContext() != null) {
            zk.templateZookeeperContainerSecurityContext = template.getZookeeperContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            zk.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            zk.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            zk.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            zk.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(zk, template.getPodDisruptionBudget());
    }
    zk.templatePodLabels = Util.mergeLabelsOrAnnotations(zk.templatePodLabels, DEFAULT_POD_LABELS);
    // Should run at the end when everything is set
    ZooKeeperSpecChecker specChecker = new ZooKeeperSpecChecker(zk);
    zk.warningConditions.addAll(specChecker.run());
    return zk;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) ZookeeperClusterTemplate(io.strimzi.api.kafka.model.template.ZookeeperClusterTemplate) KafkaJmxAuthenticationPassword(io.strimzi.api.kafka.model.KafkaJmxAuthenticationPassword) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ZookeeperClusterSpec(io.strimzi.api.kafka.model.ZookeeperClusterSpec) Storage(io.strimzi.api.kafka.model.storage.Storage)

Example 30 with InlineLogging

use of io.strimzi.api.kafka.model.InlineLogging in project strimzi-kafka-operator by strimzi.

the class CruiseControl method fromCrd.

/**
 * Creates an instance of the Cruise Control model from the custom resource. When Cruise Control is not enabled,
 * this will return null.
 *
 * @param reconciliation    Reconciliation marker used for logging
 * @param kafkaCr           The Kafka custom resource
 * @param versions          Supported Kafka versions
 * @param storage           The actual storage configuration used by the cluster. This might differ from the storage
 *                          configuration configured by the user in the Kafka CR due to unallowed changes.
 *
 * @return                  Instance of the Cruise Control model
 */
@SuppressWarnings({ "checkstyle:NPathComplexity", "checkstyle:CyclomaticComplexity" })
public static CruiseControl fromCrd(Reconciliation reconciliation, Kafka kafkaCr, KafkaVersion.Lookup versions, Storage storage) {
    CruiseControlSpec ccSpec = kafkaCr.getSpec().getCruiseControl();
    KafkaClusterSpec kafkaClusterSpec = kafkaCr.getSpec().getKafka();
    if (ccSpec != null) {
        CruiseControl cruiseControl = new CruiseControl(reconciliation, kafkaCr);
        cruiseControl.setReplicas(DEFAULT_REPLICAS);
        String image = ccSpec.getImage();
        if (image == null) {
            image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE, versions.kafkaImage(kafkaClusterSpec.getImage(), versions.defaultVersion().version()));
        }
        cruiseControl.setImage(image);
        cruiseControl.updateConfiguration(ccSpec);
        CruiseControlConfiguration ccConfiguration = (CruiseControlConfiguration) cruiseControl.getConfiguration();
        cruiseControl.sslEnabled = ccConfiguration.isApiSslEnabled();
        cruiseControl.authEnabled = ccConfiguration.isApiAuthEnabled();
        KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
        if (configuration.getConfigOption(MIN_INSYNC_REPLICAS) != null) {
            cruiseControl.minInsyncReplicas = configuration.getConfigOption(MIN_INSYNC_REPLICAS);
        }
        // To avoid illegal storage configurations provided by the user,
        // we rely on the storage configuration provided by the KafkaAssemblyOperator
        cruiseControl.capacity = new Capacity(kafkaCr.getSpec(), storage);
        // Parse different types of metrics configurations
        ModelUtils.parseMetrics(cruiseControl, ccSpec);
        if (ccSpec.getReadinessProbe() != null) {
            cruiseControl.setReadinessProbe(ccSpec.getReadinessProbe());
        }
        if (ccSpec.getLivenessProbe() != null) {
            cruiseControl.setLivenessProbe(ccSpec.getLivenessProbe());
        }
        Logging logging = ccSpec.getLogging();
        cruiseControl.setLogging(logging == null ? new InlineLogging() : logging);
        cruiseControl.setGcLoggingEnabled(ccSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : ccSpec.getJvmOptions().isGcLoggingEnabled());
        cruiseControl.setJvmOptions(ccSpec.getJvmOptions());
        cruiseControl.setResources(ccSpec.getResources());
        cruiseControl.setOwnerReference(kafkaCr);
        if (ccSpec.getTemplate() != null) {
            CruiseControlTemplate template = ccSpec.getTemplate();
            ModelUtils.parsePodTemplate(cruiseControl, template.getPod());
            ModelUtils.parseInternalServiceTemplate(cruiseControl, template.getApiService());
            if (template.getDeployment() != null && template.getDeployment().getMetadata() != null) {
                cruiseControl.templateDeploymentLabels = template.getDeployment().getMetadata().getLabels();
                cruiseControl.templateDeploymentAnnotations = template.getDeployment().getMetadata().getAnnotations();
            }
            if (template.getCruiseControlContainer() != null && template.getCruiseControlContainer().getEnv() != null) {
                cruiseControl.templateCruiseControlContainerEnvVars = template.getCruiseControlContainer().getEnv();
            }
            if (template.getCruiseControlContainer() != null && template.getCruiseControlContainer().getSecurityContext() != null) {
                cruiseControl.templateCruiseControlContainerSecurityContext = template.getCruiseControlContainer().getSecurityContext();
            }
            if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
                cruiseControl.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
                cruiseControl.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
            }
            ModelUtils.parsePodDisruptionBudgetTemplate(cruiseControl, template.getPodDisruptionBudget());
        }
        cruiseControl.templatePodLabels = Util.mergeLabelsOrAnnotations(cruiseControl.templatePodLabels, DEFAULT_POD_LABELS);
        return cruiseControl;
    } else {
        return null;
    }
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) CruiseControlTemplate(io.strimzi.api.kafka.model.template.CruiseControlTemplate) CruiseControlSpec(io.strimzi.api.kafka.model.CruiseControlSpec) Capacity(io.strimzi.operator.cluster.model.cruisecontrol.Capacity) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) InlineLogging(io.strimzi.api.kafka.model.InlineLogging)

Aggregations

InlineLogging (io.strimzi.api.kafka.model.InlineLogging)36 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)16 ExternalLogging (io.strimzi.api.kafka.model.ExternalLogging)14 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)10 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)10 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)10 ExternalLoggingBuilder (io.strimzi.api.kafka.model.ExternalLoggingBuilder)10 Tag (org.junit.jupiter.api.Tag)10 IntOrString (io.fabric8.kubernetes.api.model.IntOrString)8 KafkaClusterSpec (io.strimzi.api.kafka.model.KafkaClusterSpec)8 Logging (io.strimzi.api.kafka.model.Logging)8 HashMap (java.util.HashMap)8 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)6 Condition (io.strimzi.api.kafka.model.status.Condition)6 Storage (io.strimzi.api.kafka.model.storage.Storage)6 MetricsAndLogging (io.strimzi.operator.common.MetricsAndLogging)6 TestStorage (io.strimzi.systemtest.storage.TestStorage)6 Kafka (io.strimzi.api.kafka.model.Kafka)4 KafkaAuthorizationKeycloak (io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak)4 KafkaConnect (io.strimzi.api.kafka.model.KafkaConnect)4