Search in sources :

Example 26 with GenericKafkaListener

use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener in project strimzi by strimzi.

the class KafkaCluster method generateExternalBootstrapRoutes.

/**
 * Generates a list of bootstrap route which can be used to bootstrap clients outside of OpenShift.
 *
 * @return The list of generated Routes
 */
public List<Route> generateExternalBootstrapRoutes() {
    List<GenericKafkaListener> routeListeners = ListenersUtils.routeListeners(listeners);
    List<Route> routes = new ArrayList<>(routeListeners.size());
    for (GenericKafkaListener listener : routeListeners) {
        String routeName = ListenersUtils.backwardsCompatibleBootstrapRouteOrIngressName(cluster, listener);
        String serviceName = ListenersUtils.backwardsCompatibleBootstrapServiceName(cluster, listener);
        Route route = new RouteBuilder().withNewMetadata().withName(routeName).withLabels(Util.mergeLabelsOrAnnotations(getLabelsWithStrimziName(name, templateExternalBootstrapRouteLabels).toMap(), ListenersUtils.bootstrapLabels(listener))).withAnnotations(Util.mergeLabelsOrAnnotations(templateExternalBootstrapRouteAnnotations, ListenersUtils.bootstrapAnnotations(listener))).withNamespace(namespace).withOwnerReferences(createOwnerReference()).endMetadata().withNewSpec().withNewTo().withKind("Service").withName(serviceName).endTo().withNewPort().withNewTargetPort(listener.getPort()).endPort().withNewTls().withTermination("passthrough").endTls().endSpec().build();
        String host = ListenersUtils.bootstrapHost(listener);
        if (host != null) {
            route.getSpec().setHost(host);
        }
        routes.add(route);
    }
    return routes;
}
Also used : GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) RouteBuilder(io.fabric8.openshift.api.model.RouteBuilder) ArrayList(java.util.ArrayList) Route(io.fabric8.openshift.api.model.Route)

Example 27 with GenericKafkaListener

use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener in project strimzi by strimzi.

the class KafkaCluster method getEnvVars.

@Override
protected List<EnvVar> getEnvVars() {
    List<EnvVar> varList = new ArrayList<>();
    varList.add(buildEnvVar(ENV_VAR_KAFKA_METRICS_ENABLED, String.valueOf(isMetricsEnabled)));
    varList.add(buildEnvVar(ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED, String.valueOf(gcLoggingEnabled)));
    if (javaSystemProperties != null) {
        varList.add(buildEnvVar(ENV_VAR_STRIMZI_JAVA_SYSTEM_PROPERTIES, ModelUtils.getJavaSystemPropertiesToString(javaSystemProperties)));
    }
    heapOptions(varList, 0.5, 5L * 1024L * 1024L * 1024L);
    jvmPerformanceOptions(varList);
    for (GenericKafkaListener listener : listeners) {
        if (isListenerWithOAuth(listener)) {
            KafkaListenerAuthenticationOAuth oauth = (KafkaListenerAuthenticationOAuth) listener.getAuth();
            if (oauth.getClientSecret() != null) {
                varList.add(buildEnvVarFromSecret("STRIMZI_" + ListenersUtils.envVarIdentifier(listener) + "_OAUTH_CLIENT_SECRET", oauth.getClientSecret().getSecretName(), oauth.getClientSecret().getKey()));
            }
        }
    }
    if (isJmxEnabled()) {
        varList.add(buildEnvVar(ENV_VAR_KAFKA_JMX_ENABLED, "true"));
        if (isJmxAuthenticated) {
            varList.add(buildEnvVarFromSecret(ENV_VAR_KAFKA_JMX_USERNAME, jmxSecretName(cluster), SECRET_JMX_USERNAME_KEY));
            varList.add(buildEnvVarFromSecret(ENV_VAR_KAFKA_JMX_PASSWORD, jmxSecretName(cluster), SECRET_JMX_PASSWORD_KEY));
        }
    }
    // Add shared environment variables used for all containers
    varList.addAll(getRequiredEnvVars());
    // Add user defined environment variables to the Kafka broker containers
    addContainerEnvsToExistingEnvs(varList, templateKafkaContainerEnvVars);
    return varList;
}
Also used : GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) ArrayList(java.util.ArrayList) ContainerEnvVar(io.strimzi.api.kafka.model.ContainerEnvVar) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) KafkaListenerAuthenticationOAuth(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuth)

Example 28 with GenericKafkaListener

use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener in project strimzi by strimzi.

the class KafkaCluster method getServicePorts.

/**
 * Generates ports for bootstrap service.
 * The bootstrap service contains only the client interfaces.
 * Not the replication interface which doesn't need bootstrap service.
 *
 * @return List with generated ports
 */
private List<ServicePort> getServicePorts() {
    List<GenericKafkaListener> internalListeners = ListenersUtils.internalListeners(listeners);
    List<ServicePort> ports = new ArrayList<>(internalListeners.size() + 1);
    ports.add(createServicePort(REPLICATION_PORT_NAME, REPLICATION_PORT, REPLICATION_PORT, "TCP"));
    for (GenericKafkaListener listener : internalListeners) {
        ports.add(createServicePort(ListenersUtils.backwardsCompatiblePortName(listener), listener.getPort(), listener.getPort(), "TCP"));
    }
    return ports;
}
Also used : ServicePort(io.fabric8.kubernetes.api.model.ServicePort) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) ArrayList(java.util.ArrayList)

Example 29 with GenericKafkaListener

use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener in project strimzi by strimzi.

the class KafkaCluster method fromCrd.

@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
    KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
    KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
    KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
    // This also validates that the Kafka version is supported
    result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
    result.setOwnerReference(kafkaAssembly);
    result.setReplicas(kafkaClusterSpec.getReplicas());
    validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
    validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
    result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
    if (kafkaClusterSpec.getReadinessProbe() != null) {
        result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
    }
    if (kafkaClusterSpec.getLivenessProbe() != null) {
        result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
    }
    result.setRack(kafkaClusterSpec.getRack());
    String initImage = kafkaClusterSpec.getBrokerRackInitImage();
    if (initImage == null) {
        initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
    }
    result.setInitImage(initImage);
    Logging logging = kafkaClusterSpec.getLogging();
    result.setLogging(logging == null ? new InlineLogging() : logging);
    result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
    if (kafkaClusterSpec.getJvmOptions() != null) {
        result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
    }
    result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
    if (kafkaClusterSpec.getJmxOptions() != null) {
        result.setJmxEnabled(Boolean.TRUE);
        AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
    }
    // Handle Kafka broker configuration
    KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
    configureCruiseControlMetrics(kafkaAssembly, result, configuration);
    validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
    result.setConfiguration(configuration);
    // Parse different types of metrics configurations
    ModelUtils.parseMetrics(result, kafkaClusterSpec);
    if (oldStorage != null) {
        Storage newStorage = kafkaClusterSpec.getStorage();
        AbstractModel.validatePersistentStorage(newStorage);
        StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
        if (!diff.isEmpty()) {
            LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
            LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
            Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
            result.addWarningCondition(warning);
            result.setStorage(oldStorage);
        } else {
            result.setStorage(newStorage);
        }
    } else {
        result.setStorage(kafkaClusterSpec.getStorage());
    }
    result.setResources(kafkaClusterSpec.getResources());
    // Configure listeners
    if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
        LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
        throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
    }
    List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
    ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
    result.setListeners(listeners);
    // Set authorization
    if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
        if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
            throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
        } else {
            KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
            if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
                LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
                throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
            }
        }
    }
    result.setAuthorization(kafkaClusterSpec.getAuthorization());
    if (kafkaClusterSpec.getTemplate() != null) {
        KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
        if (template.getStatefulset() != null) {
            if (template.getStatefulset().getPodManagementPolicy() != null) {
                result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
            }
            if (template.getStatefulset().getMetadata() != null) {
                result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
                result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
            }
        }
        if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
            result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
            result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodTemplate(result, template.getPod());
        ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
        ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
        if (template.getExternalBootstrapService() != null) {
            if (template.getExternalBootstrapService().getMetadata() != null) {
                result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
                result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
            }
        }
        if (template.getPerPodService() != null) {
            if (template.getPerPodService().getMetadata() != null) {
                result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
                result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
            }
        }
        if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
            result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
            result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
        }
        if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
            result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
            result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
        }
        if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
            result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
            result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
        }
        if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
            result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
            result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
        }
        if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
            result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
            result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
        }
        if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
            result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
            result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
            result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
            result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
        }
        if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
            result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
        }
        if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
            result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
        }
        if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
            result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
            result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
        }
        if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
            result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
            result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
        }
        ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
    }
    result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
    return result;
}
Also used : KafkaClusterSpec(io.strimzi.api.kafka.model.KafkaClusterSpec) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) Logging(io.strimzi.api.kafka.model.Logging) Condition(io.strimzi.api.kafka.model.status.Condition) KafkaAuthorizationKeycloak(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloak) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) KafkaSpec(io.strimzi.api.kafka.model.KafkaSpec) Storage(io.strimzi.api.kafka.model.storage.Storage) GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate)

Example 30 with GenericKafkaListener

use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener in project strimzi by strimzi.

the class KafkaBrokerConfigurationBuilder method withListeners.

/**
 * Configures the listeners based on the listeners enabled by the users in the Kafka CR.
 *
 * @param clusterName                   Name of the cluster (important for the advertised hostnames)
 * @param namespace                     Namespace (important for generating the advertised hostname)
 * @param kafkaListeners                The listeners configuration from the Kafka CR
 * @param controlPlaneListenerActive    Activates the control plane listener (the listener is always configured,
 *                                      but this flag tells Kafka to use it for control plane communication)
 *
 * @return  Returns the builder instance
 */
public KafkaBrokerConfigurationBuilder withListeners(String clusterName, String namespace, List<GenericKafkaListener> kafkaListeners, boolean controlPlaneListenerActive) {
    List<String> listeners = new ArrayList<>();
    List<String> advertisedListeners = new ArrayList<>();
    List<String> securityProtocol = new ArrayList<>();
    // Control Plane listener
    listeners.add("CONTROLPLANE-9090://0.0.0.0:9090");
    advertisedListeners.add(String.format("CONTROLPLANE-9090://%s:9090", // Pod name constructed to be templatable for each individual ordinal
    DnsNameGenerator.podDnsNameWithoutClusterDomain(namespace, KafkaResources.brokersServiceName(clusterName), KafkaResources.kafkaStatefulSetName(clusterName) + "-${STRIMZI_BROKER_ID}")));
    securityProtocol.add("CONTROLPLANE-9090:SSL");
    configureControlPlaneListener();
    // Replication listener
    listeners.add("REPLICATION-9091://0.0.0.0:9091");
    advertisedListeners.add(String.format("REPLICATION-9091://%s:9091", // Pod name constructed to be templatable for each individual ordinal
    DnsNameGenerator.podDnsNameWithoutClusterDomain(namespace, KafkaResources.brokersServiceName(clusterName), KafkaResources.kafkaStatefulSetName(clusterName) + "-${STRIMZI_BROKER_ID}")));
    securityProtocol.add("REPLICATION-9091:SSL");
    configureReplicationListener();
    for (GenericKafkaListener listener : kafkaListeners) {
        int port = listener.getPort();
        String listenerName = ListenersUtils.identifier(listener).toUpperCase(Locale.ENGLISH);
        String envVarListenerName = ListenersUtils.envVarIdentifier(listener);
        printSectionHeader("Listener configuration: " + listenerName);
        listeners.add(listenerName + "://0.0.0.0:" + port);
        advertisedListeners.add(String.format("%s://${STRIMZI_%s_ADVERTISED_HOSTNAME}:${STRIMZI_%s_ADVERTISED_PORT}", listenerName, envVarListenerName, envVarListenerName));
        configureAuthentication(listenerName, securityProtocol, listener.isTls(), listener.getAuth());
        configureListener(listenerName, listener.getConfiguration());
        if (listener.isTls()) {
            CertAndKeySecretSource customServerCert = null;
            if (listener.getConfiguration() != null) {
                customServerCert = listener.getConfiguration().getBrokerCertChainAndKey();
            }
            configureTls(listenerName, customServerCert);
        }
        writer.println();
    }
    configureOAuthPrincipalBuilderIfNeeded(writer, kafkaListeners);
    printSectionHeader("Common listener configuration");
    writer.println("listeners=" + String.join(",", listeners));
    writer.println("advertised.listeners=" + String.join(",", advertisedListeners));
    writer.println("listener.security.protocol.map=" + String.join(",", securityProtocol));
    if (controlPlaneListenerActive) {
        writer.println("control.plane.listener.name=CONTROLPLANE-9090");
    }
    writer.println("inter.broker.listener.name=REPLICATION-9091");
    writer.println("sasl.enabled.mechanisms=");
    writer.println("ssl.secure.random.implementation=SHA1PRNG");
    writer.println("ssl.endpoint.identification.algorithm=HTTPS");
    writer.println();
    return this;
}
Also used : GenericKafkaListener(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener) ArrayList(java.util.ArrayList) CertAndKeySecretSource(io.strimzi.api.kafka.model.CertAndKeySecretSource)

Aggregations

GenericKafkaListener (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener)160 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)116 ParallelTest (io.strimzi.test.annotations.ParallelTest)102 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)50 ArrayList (java.util.ArrayList)38 GenericKafkaListenerConfigurationBrokerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBrokerBuilder)18 Matchers.containsString (org.hamcrest.Matchers.containsString)16 KafkaListenerAuthenticationOAuth (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuth)12 KafkaListenerAuthenticationOAuthBuilder (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuthBuilder)12 Kafka (io.strimzi.api.kafka.model.Kafka)10 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)8 HTTPIngressPathBuilder (io.fabric8.kubernetes.api.model.networking.v1.HTTPIngressPathBuilder)8 Ingress (io.fabric8.kubernetes.api.model.networking.v1.Ingress)8 IngressTLSBuilder (io.fabric8.kubernetes.api.model.networking.v1.IngressTLSBuilder)8 HashMap (java.util.HashMap)8 List (java.util.List)8 Collectors (java.util.stream.Collectors)8 NetworkPolicyIngressRule (io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRule)6 NetworkPolicyIngressRuleBuilder (io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRuleBuilder)6 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)6