Search in sources :

Example 6 with KafkaAuthorizationKeycloak

use of io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak in project strimzi-kafka-operator by strimzi.

the class KafkaCluster method fromCrd.

@SuppressWarnings({ "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas, boolean useKRaft) {
    KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
    KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
    KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
    // This also validates that the Kafka version is supported
    result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
    result.setOwnerReference(kafkaAssembly);
    result.setReplicas(kafkaClusterSpec.getReplicas());
    // Configures KRaft and KRaft cluster ID
    if (useKRaft) {
        result.useKRaft = true;
        result.clusterId = getOrGenerateKRaftClusterId(kafkaAssembly);
    }
    validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
    result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
    if (kafkaClusterSpec.getReadinessProbe() != null) {
        result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
    }
    if (kafkaClusterSpec.getLivenessProbe() != null) {
        result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
    }
    result.rack = kafkaClusterSpec.getRack();
    String initImage = kafkaClusterSpec.getBrokerRackInitImage();
    if (initImage == null) {
        initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
    }
    result.initImage = initImage;
    Logging logging = kafkaClusterSpec.getLogging();
    result.setLogging(logging == null ? new InlineLogging() : logging);
    result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
    result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
    if (kafkaClusterSpec.getJmxOptions() != null) {
        result.isJmxEnabled = true;
        AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
    }
    // Handle Kafka broker configuration
    KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
    configureCruiseControlMetrics(kafkaAssembly, result, configuration);
    validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
    result.setConfiguration(configuration);
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(result, kafkaClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = kafkaClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            result.addWarningCondition(warning);
            result.setStorage(oldStorage);
        } else {
            result.setStorage(newStorage);
        }
    } else {
        result.setStorage(kafkaClusterSpec.getStorage());
    }
    result.setResources(kafkaClusterSpec.getResources());
    // Configure listeners
    if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
        LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
        throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
    }
    List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
    ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
    result.listeners = listeners;
    // Set authorization
    if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
        if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
            throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
        } else {
            KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
            if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
                LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
                throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
            }
        }
    }
    result.authorization = kafkaClusterSpec.getAuthorization();
    if (kafkaClusterSpec.getTemplate() != null) {
        KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(result, template.getPod());
        ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
        ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
        if (template.getExternalBootstrapService() != null) {
            if (template.getExternalBootstrapService().getMetadata() != null) {
                result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
                result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
            }
        }
        if (template.getPerPodService() != null) {
            if (template.getPerPodService().getMetadata() != null) {
                result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
                result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
            }
        }
        if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
            result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
            result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
        }
        if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
            result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
            result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
        }
        if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
            result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
            result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
        }
        if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
            result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
            result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
        }
        if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
            result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
            result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
        }
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
            result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
            result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
            result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
            result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
            result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
    }
    result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
    // Should run at the end when everything is set
    KafkaSpecChecker specChecker = new KafkaSpecChecker(kafkaSpec, versions, result);
    result.warningConditions.addAll(specChecker.run());
    return result;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaSpec(io.strimzi.api.kafka.model.KafkaSpec) Storage(io.strimzi.api.kafka.model.storage.Storage) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaSpecChecker(io.strimzi.operator.cluster.operator.resource.KafkaSpecChecker) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate)

Example 7 with KafkaAuthorizationKeycloak

use of io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak in project strimzi-kafka-operator by strimzi.

the class KafkaCluster method getNonDataVolumes.

/**
 * Generates list of non-data volumes used by Kafka Pods. This includes tmp volumes, mounted secrets and config
 * maps.
 *
 * @param isOpenShift               Indicates whether we are on OpenShift or not
 * @param perBrokerConfiguration    Indicates whether the shared configuration ConfigMap or the per-broker ConfigMap
 *                                  should be mounted.
 * @param podName                   The name of the Pod for which are these volumes generated. The Pod name
 *                                  identifies which ConfigMap should be used when perBrokerConfiguration is set to
 *                                  true. When perBrokerConfiguration is set to false, the Pod name is not used and
 *                                  can be set to null.
 *
 * @return                          List of non-data volumes used by the ZooKeeper pods
 */
private List<Volume> getNonDataVolumes(boolean isOpenShift, boolean perBrokerConfiguration, String podName) {
    List<Volume> volumeList = new ArrayList<>();
    if (rack != null || isExposedWithNodePort()) {
        volumeList.add(VolumeUtils.createEmptyDirVolume(INIT_VOLUME_NAME, "1Mi", "Memory"));
    }
    volumeList.add(createTempDirVolume());
    volumeList.add(VolumeUtils.createSecretVolume(CLUSTER_CA_CERTS_VOLUME, AbstractModel.clusterCaCertSecretName(cluster), isOpenShift));
    volumeList.add(VolumeUtils.createSecretVolume(BROKER_CERTS_VOLUME, KafkaResources.kafkaSecretName(cluster), isOpenShift));
    volumeList.add(VolumeUtils.createSecretVolume(CLIENT_CA_CERTS_VOLUME, KafkaResources.clientsCaCertificateSecretName(cluster), isOpenShift));
    if (perBrokerConfiguration) {
        volumeList.add(VolumeUtils.createConfigMapVolume(logAndMetricsConfigVolumeName, podName));
    } else {
        volumeList.add(VolumeUtils.createConfigMapVolume(logAndMetricsConfigVolumeName, ancillaryConfigMapName));
    }
    volumeList.add(VolumeUtils.createEmptyDirVolume("ready-files", "1Ki", "Memory"));
    for (GenericKafkaListener listener : listeners) {
        if (listener.isTls() && listener.getConfiguration() != null && listener.getConfiguration().getBrokerCertChainAndKey() != null) {
            CertAndKeySecretSource secretSource = listener.getConfiguration().getBrokerCertChainAndKey();
            Map<String, String> items = new HashMap<>(2);
            items.put(secretSource.getKey(), "tls.key");
            items.put(secretSource.getCertificate(), "tls.crt");
            volumeList.add(VolumeUtils.createSecretVolume("custom-" + ListenersUtils.identifier(listener) + "-certs", secretSource.getSecretName(), items, isOpenShift));
        }
        if (isListenerWithOAuth(listener)) {
            KafkaListenerAuthenticationOAuth oauth = (KafkaListenerAuthenticationOAuth) listener.getAuth();
            volumeList.addAll(AuthenticationUtils.configureOauthCertificateVolumes("oauth-" + ListenersUtils.identifier(listener), oauth.getTlsTrustedCertificates(), isOpenShift));
        }
        if (isListenerWithCustomAuth(listener)) {
            KafkaListenerAuthenticationCustom custom = (KafkaListenerAuthenticationCustom) listener.getAuth();
            volumeList.addAll(AuthenticationUtils.configureGenericSecretVolumes("custom-listener-" + ListenersUtils.identifier(listener), custom.getSecrets(), isOpenShift));
        }
    }
    if (authorization instanceof KafkaAuthorizationKeycloak) {
        KafkaAuthorizationKeycloak keycloakAuthz = (KafkaAuthorizationKeycloak) authorization;
        volumeList.addAll(AuthenticationUtils.configureOauthCertificateVolumes("authz-keycloak", keycloakAuthz.getTlsTrustedCertificates(), isOpenShift));
    }
    return volumeList;
}
Also used : GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) Volume(io.fabric8.kubernetes.api.model.Volume) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) KafkaListenerAuthenticationOAuth(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuth) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) KafkaListenerAuthenticationCustom(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationCustom) CertAndKeySecretSource(io.strimzi.api.kafka.model.CertAndKeySecretSource)

Example 8 with KafkaAuthorizationKeycloak

use of io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak in project strimzi by strimzi.

the class KafkaCluster method fromCrd.

@SuppressWarnings({ "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas, boolean useKRaft) {
    KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
    KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
    KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
    // This also validates that the Kafka version is supported
    result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
    result.setOwnerReference(kafkaAssembly);
    result.setReplicas(kafkaClusterSpec.getReplicas());
    // Configures KRaft and KRaft cluster ID
    if (useKRaft) {
        result.useKRaft = true;
        result.clusterId = getOrGenerateKRaftClusterId(kafkaAssembly);
    }
    validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
    result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
    if (kafkaClusterSpec.getReadinessProbe() != null) {
        result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
    }
    if (kafkaClusterSpec.getLivenessProbe() != null) {
        result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
    }
    result.rack = kafkaClusterSpec.getRack();
    String initImage = kafkaClusterSpec.getBrokerRackInitImage();
    if (initImage == null) {
        initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
    }
    result.initImage = initImage;
    Logging logging = kafkaClusterSpec.getLogging();
    result.setLogging(logging == null ? new InlineLogging() : logging);
    result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
    result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
    if (kafkaClusterSpec.getJmxOptions() != null) {
        result.isJmxEnabled = true;
        AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
    }
    // Handle Kafka broker configuration
    KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
    configureCruiseControlMetrics(kafkaAssembly, result, configuration);
    validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
    result.setConfiguration(configuration);
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(result, kafkaClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = kafkaClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            result.addWarningCondition(warning);
            result.setStorage(oldStorage);
        } else {
            result.setStorage(newStorage);
        }
    } else {
        result.setStorage(kafkaClusterSpec.getStorage());
    }
    result.setResources(kafkaClusterSpec.getResources());
    // Configure listeners
    if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
        LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
        throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
    }
    List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
    ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
    result.listeners = listeners;
    // Set authorization
    if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
        if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
            throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
        } else {
            KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
            if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
                LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
                throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
            }
        }
    }
    result.authorization = kafkaClusterSpec.getAuthorization();
    if (kafkaClusterSpec.getTemplate() != null) {
        KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(result, template.getPod());
        ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
        ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
        if (template.getExternalBootstrapService() != null) {
            if (template.getExternalBootstrapService().getMetadata() != null) {
                result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
                result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
            }
        }
        if (template.getPerPodService() != null) {
            if (template.getPerPodService().getMetadata() != null) {
                result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
                result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
            }
        }
        if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
            result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
            result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
        }
        if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
            result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
            result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
        }
        if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
            result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
            result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
        }
        if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
            result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
            result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
        }
        if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
            result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
            result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
        }
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
            result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
            result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
            result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
            result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
            result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
    }
    result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
    // Should run at the end when everything is set
    KafkaSpecChecker specChecker = new KafkaSpecChecker(kafkaSpec, versions, result);
    result.warningConditions.addAll(specChecker.run());
    return result;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaSpec(io.strimzi.api.kafka.model.KafkaSpec) Storage(io.strimzi.api.kafka.model.storage.Storage) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaSpecChecker(io.strimzi.operator.cluster.operator.resource.KafkaSpecChecker) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate)

Example 9 with KafkaAuthorizationKeycloak

use of io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak in project strimzi by strimzi.

the class OauthAuthorizationIsolatedST method testSuperUserWithOauthAuthorization.

@Description("As a superuser of team A and team B, i am able to break defined authorization rules")
@ParallelTest
@Order(6)
void testSuperUserWithOauthAuthorization(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    String teamAProducerName = TEAM_A_PRODUCER_NAME + "-" + clusterName;
    String teamAConsumerName = TEAM_A_CONSUMER_NAME + "-" + clusterName;
    String teamBProducerName = TEAM_B_PRODUCER_NAME + "-" + clusterName;
    String teamBConsumerName = TEAM_B_CONSUMER_NAME + "-" + clusterName;
    // only write means that Team A can not create new topic 'x-.*'
    String topicXName = TOPIC_X + mapWithTestTopics.get(extensionContext.getDisplayName());
    LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaStatefulSetName(oauthClusterName));
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicXName, clusterOperator.getDeploymentNamespace()).build());
    LOGGER.info("Verifying that team B is not able write to topic starting with 'x-' because in kafka cluster" + "does not have super-users to break authorization rules");
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterOperator.getDeploymentNamespace(), oauthClusterName, userName).build());
    KafkaOauthClients teamBOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(clusterOperator.getDeploymentNamespace()).withProducerName(teamBProducerName).withConsumerName(teamBConsumerName).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(oauthClusterName)).withTopicName(topicXName).withMessageCount(MESSAGE_COUNT).withConsumerGroup("x-consumer_group_b-" + clusterName).withOauthClientId(TEAM_B_CLIENT).withOauthClientSecret(TEAM_B_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientUserName(userName).build();
    resourceManager.createResource(extensionContext, teamBOauthClientJob.producerStrimziOauthTls(oauthClusterName));
    JobUtils.waitForJobFailure(teamBProducerName, clusterOperator.getDeploymentNamespace(), 30_000);
    JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), teamBProducerName);
    LOGGER.info("Verifying that team A is not able read to topic starting with 'x-' because in kafka cluster" + "does not have super-users to break authorization rules");
    KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(clusterOperator.getDeploymentNamespace()).withProducerName(teamAProducerName).withConsumerName(teamAConsumerName).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(oauthClusterName)).withTopicName(topicXName).withMessageCount(MESSAGE_COUNT).withConsumerGroup("x-consumer_group_b1-" + clusterName).withOauthClientId(TEAM_A_CLIENT).withOauthClientSecret(TEAM_A_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientUserName(userName).build();
    resourceManager.createResource(extensionContext, teamAOauthClientJob.consumerStrimziOauthTls(oauthClusterName));
    JobUtils.waitForJobFailure(teamAConsumerName, clusterOperator.getDeploymentNamespace(), 30_000);
    JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), teamAConsumerName);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(oauthClusterName, kafka -> {
        List<String> superUsers = new ArrayList<>(2);
        superUsers.add("service-account-" + TEAM_A_CLIENT);
        superUsers.add("service-account-" + TEAM_B_CLIENT);
        ((KafkaAuthorizationKeycloak) kafka.getSpec().getKafka().getAuthorization()).setSuperUsers(superUsers);
    }, clusterOperator.getDeploymentNamespace());
    RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, 1, kafkaPods);
    LOGGER.info("Verifying that team B is able to write to topic starting with 'x-' and break authorization rule");
    resourceManager.createResource(extensionContext, teamBOauthClientJob.producerStrimziOauthTls(oauthClusterName));
    ClientUtils.waitForClientSuccess(teamBProducerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
    LOGGER.info("Verifying that team A is able to write to topic starting with 'x-' and break authorization rule");
    teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob).withConsumerGroup("x-consumer_group_b2-" + clusterName).withTopicName(topicXName).build();
    resourceManager.createResource(extensionContext, teamAOauthClientJob.consumerStrimziOauthTls(oauthClusterName));
    ClientUtils.waitForClientSuccess(teamAConsumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
Also used : KafkaOauthClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClientsBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) KafkaOauthClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClients) Order(org.junit.jupiter.api.Order) TestMethodOrder(org.junit.jupiter.api.TestMethodOrder) Description(io.vertx.core.cli.annotations.Description) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 10 with KafkaAuthorizationKeycloak

use of io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak in project strimzi-kafka-operator by strimzi.

the class KafkaBrokerConfigurationBuilder method configureAuthorization.

/**
 * Configures authorization for the Kafka brokers. This method is used only internally.
 *
 * @param clusterName Name of the cluster
 * @param superUsers Super users list who have all the rights on the cluster
 * @param authorization The authorization configuration from the Kafka CR
 * @param useKRaft      Use KRaft mode in the configuration
 */
private void configureAuthorization(String clusterName, List<String> superUsers, KafkaAuthorization authorization, boolean useKRaft) {
    if (KafkaAuthorizationSimple.TYPE_SIMPLE.equals(authorization.getType())) {
        KafkaAuthorizationSimple simpleAuthz = (KafkaAuthorizationSimple) authorization;
        if (useKRaft) {
            writer.println("authorizer.class.name=" + KafkaAuthorizationSimple.KRAFT_AUTHORIZER_CLASS_NAME);
            writer.println("early.start.listeners=" + String.join(",", List.of(CONTROL_PLANE_LISTENER_NAME, REPLICATION_LISTENER_NAME)));
        } else {
            writer.println("authorizer.class.name=" + KafkaAuthorizationSimple.AUTHORIZER_CLASS_NAME);
        }
        // User configured super users
        if (simpleAuthz.getSuperUsers() != null && simpleAuthz.getSuperUsers().size() > 0) {
            superUsers.addAll(simpleAuthz.getSuperUsers().stream().map(e -> String.format("User:%s", e)).collect(Collectors.toList()));
        }
    } else if (KafkaAuthorizationOpa.TYPE_OPA.equals(authorization.getType())) {
        KafkaAuthorizationOpa opaAuthz = (KafkaAuthorizationOpa) authorization;
        writer.println("authorizer.class.name=" + KafkaAuthorizationOpa.AUTHORIZER_CLASS_NAME);
        writer.println(String.format("%s=%s", "opa.authorizer.url", opaAuthz.getUrl()));
        writer.println(String.format("%s=%b", "opa.authorizer.allow.on.error", opaAuthz.isAllowOnError()));
        writer.println(String.format("%s=%b", "opa.authorizer.metrics.enabled", opaAuthz.isEnableMetrics()));
        writer.println(String.format("%s=%d", "opa.authorizer.cache.initial.capacity", opaAuthz.getInitialCacheCapacity()));
        writer.println(String.format("%s=%d", "opa.authorizer.cache.maximum.size", opaAuthz.getMaximumCacheSize()));
        writer.println(String.format("%s=%d", "opa.authorizer.cache.expire.after.seconds", Duration.ofMillis(opaAuthz.getExpireAfterMs()).getSeconds()));
        // User configured super users
        if (opaAuthz.getSuperUsers() != null && opaAuthz.getSuperUsers().size() > 0) {
            superUsers.addAll(opaAuthz.getSuperUsers().stream().map(e -> String.format("User:%s", e)).collect(Collectors.toList()));
        }
    } else if (KafkaAuthorizationKeycloak.TYPE_KEYCLOAK.equals(authorization.getType())) {
        KafkaAuthorizationKeycloak keycloakAuthz = (KafkaAuthorizationKeycloak) authorization;
        writer.println("authorizer.class.name=" + KafkaAuthorizationKeycloak.AUTHORIZER_CLASS_NAME);
        writer.println("strimzi.authorization.token.endpoint.uri=" + keycloakAuthz.getTokenEndpointUri());
        writer.println("strimzi.authorization.client.id=" + keycloakAuthz.getClientId());
        writer.println("strimzi.authorization.delegate.to.kafka.acl=" + keycloakAuthz.isDelegateToKafkaAcls());
        addOption(writer, "strimzi.authorization.grants.refresh.period.seconds", keycloakAuthz.getGrantsRefreshPeriodSeconds());
        addOption(writer, "strimzi.authorization.grants.refresh.pool.size", keycloakAuthz.getGrantsRefreshPoolSize());
        addOption(writer, "strimzi.authorization.connect.timeout.seconds", keycloakAuthz.getConnectTimeoutSeconds());
        addOption(writer, "strimzi.authorization.read.timeout.seconds", keycloakAuthz.getReadTimeoutSeconds());
        writer.println("strimzi.authorization.kafka.cluster.name=" + clusterName);
        if (keycloakAuthz.getTlsTrustedCertificates() != null && keycloakAuthz.getTlsTrustedCertificates().size() > 0) {
            writer.println("strimzi.authorization.ssl.truststore.location=/tmp/kafka/authz-keycloak.truststore.p12");
            writer.println("strimzi.authorization.ssl.truststore.password=" + PLACEHOLDER_CERT_STORE_PASSWORD);
            writer.println("strimzi.authorization.ssl.truststore.type=PKCS12");
            writer.println("strimzi.authorization.ssl.secure.random.implementation=SHA1PRNG");
            String endpointIdentificationAlgorithm = keycloakAuthz.isDisableTlsHostnameVerification() ? "" : "HTTPS";
            writer.println("strimzi.authorization.ssl.endpoint.identification.algorithm=" + endpointIdentificationAlgorithm);
        }
        // User configured super users
        if (keycloakAuthz.getSuperUsers() != null && keycloakAuthz.getSuperUsers().size() > 0) {
            superUsers.addAll(keycloakAuthz.getSuperUsers().stream().map(e -> String.format("User:%s", e)).collect(Collectors.toList()));
        }
    } else if (KafkaAuthorizationCustom.TYPE_CUSTOM.equals(authorization.getType())) {
        KafkaAuthorizationCustom customAuthz = (KafkaAuthorizationCustom) authorization;
        writer.println("authorizer.class.name=" + customAuthz.getAuthorizerClass());
        // User configured super users
        if (customAuthz.getSuperUsers() != null && customAuthz.getSuperUsers().size() > 0) {
            superUsers.addAll(customAuthz.getSuperUsers().stream().map(e -> String.format("User:%s", e)).collect(Collectors.toList()));
        }
    }
}
Also used : VolumeMount(io.fabric8.kubernetes.api.model.VolumeMount) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) Rack(io.strimzi.api.kafka.model.Rack) KafkaListenerAuthentication(io.strimzi.api.kafka.model.listener.KafkaListenerAuthentication) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) KafkaAuthorizationCustom(io.strimzi.api.kafka.model.KafkaAuthorizationCustom) KafkaListenerAuthenticationTls(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls) KafkaAuthorization(io.strimzi.api.kafka.model.KafkaAuthorization) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) GenericKafkaListenerConfiguration(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfiguration) Locale(java.util.Locale) Duration(java.time.Duration) KafkaAuthorizationOpa(io.strimzi.api.kafka.model.KafkaAuthorizationOpa) KafkaListenerAuthenticationScramSha512(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationScramSha512) PrintWriter(java.io.PrintWriter) CertAndKeySecretSource(io.strimzi.api.kafka.model.CertAndKeySecretSource) CruiseControlConfigurationParameters(io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) KafkaListenerAuthenticationCustom(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationCustom) StringWriter(java.io.StringWriter) ServerPlainConfig(io.strimzi.kafka.oauth.server.plain.ServerPlainConfig) ServerConfig(io.strimzi.kafka.oauth.server.ServerConfig) Collectors(java.util.stream.Collectors) KafkaListenerAuthenticationOAuth(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuth) KafkaAuthorizationSimple(io.strimzi.api.kafka.model.KafkaAuthorizationSimple) Reconciliation(io.strimzi.operator.common.Reconciliation) List(java.util.List) Optional(java.util.Optional) CruiseControlSpec(io.strimzi.api.kafka.model.CruiseControlSpec) KafkaAuthorizationSimple(io.strimzi.api.kafka.model.KafkaAuthorizationSimple) KafkaAuthorizationCustom(io.strimzi.api.kafka.model.KafkaAuthorizationCustom) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) KafkaAuthorizationOpa(io.strimzi.api.kafka.model.KafkaAuthorizationOpa)

Aggregations

KafkaAuthorizationKeycloak (io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak)16 GenericKafkaListener (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener)14 ArrayList (java.util.ArrayList)12 KafkaListenerAuthenticationCustom (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationCustom)10 KafkaListenerAuthenticationOAuth (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuth)10 CertAndKeySecretSource (io.strimzi.api.kafka.model.CertAndKeySecretSource)8 VolumeMount (io.fabric8.kubernetes.api.model.VolumeMount)6 Volume (io.fabric8.kubernetes.api.model.Volume)4 CruiseControlSpec (io.strimzi.api.kafka.model.CruiseControlSpec)4 InlineLogging (io.strimzi.api.kafka.model.InlineLogging)4 KafkaAuthorization (io.strimzi.api.kafka.model.KafkaAuthorization)4 KafkaAuthorizationCustom (io.strimzi.api.kafka.model.KafkaAuthorizationCustom)4 KafkaAuthorizationOpa (io.strimzi.api.kafka.model.KafkaAuthorizationOpa)4 KafkaAuthorizationSimple (io.strimzi.api.kafka.model.KafkaAuthorizationSimple)4 KafkaClusterSpec (io.strimzi.api.kafka.model.KafkaClusterSpec)4 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)4 KafkaSpec (io.strimzi.api.kafka.model.KafkaSpec)4 Rack (io.strimzi.api.kafka.model.Rack)4 KafkaListenerAuthentication (io.strimzi.api.kafka.model.listener.KafkaListenerAuthentication)4 KafkaListenerAuthenticationScramSha512 (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationScramSha512)4