use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAuthenticationOAuth in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method buildListeners.
protected List<GenericKafkaListener> buildListeners(ManagedKafka managedKafka, int replicas) {
KafkaListenerAuthentication plainOverOauthAuthenticationListener = null;
KafkaListenerAuthentication oauthAuthenticationListener = null;
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth managedKafkaAuthenticationOAuth = managedKafka.getSpec().getOauth();
CertSecretSource ssoTlsCertSecretSource = buildSsoTlsCertSecretSource(managedKafka);
KafkaListenerAuthenticationOAuthBuilder plainOverOauthAuthenticationListenerBuilder = new KafkaListenerAuthenticationOAuthBuilder().withClientId(managedKafkaAuthenticationOAuth.getClientId()).withJwksEndpointUri(managedKafkaAuthenticationOAuth.getJwksEndpointURI()).withUserNameClaim(managedKafkaAuthenticationOAuth.getUserNameClaim()).withFallbackUserNameClaim(managedKafkaAuthenticationOAuth.getFallbackUserNameClaim()).withCustomClaimCheck(managedKafkaAuthenticationOAuth.getCustomClaimCheck()).withValidIssuerUri(managedKafkaAuthenticationOAuth.getValidIssuerEndpointURI()).withClientSecret(buildSsoClientGenericSecretSource(managedKafka)).withEnablePlain(true).withTokenEndpointUri(managedKafkaAuthenticationOAuth.getTokenEndpointURI());
if (ssoTlsCertSecretSource != null) {
plainOverOauthAuthenticationListenerBuilder.withTlsTrustedCertificates(ssoTlsCertSecretSource);
}
plainOverOauthAuthenticationListener = plainOverOauthAuthenticationListenerBuilder.build();
KafkaListenerAuthenticationOAuthBuilder oauthAuthenticationListenerBuilder = new KafkaListenerAuthenticationOAuthBuilder().withClientId(managedKafkaAuthenticationOAuth.getClientId()).withJwksEndpointUri(managedKafkaAuthenticationOAuth.getJwksEndpointURI()).withUserNameClaim(managedKafkaAuthenticationOAuth.getUserNameClaim()).withFallbackUserNameClaim(managedKafkaAuthenticationOAuth.getFallbackUserNameClaim()).withCustomClaimCheck(managedKafkaAuthenticationOAuth.getCustomClaimCheck()).withValidIssuerUri(managedKafkaAuthenticationOAuth.getValidIssuerEndpointURI()).withClientSecret(buildSsoClientGenericSecretSource(managedKafka));
if (ssoTlsCertSecretSource != null) {
oauthAuthenticationListenerBuilder.withTlsTrustedCertificates(ssoTlsCertSecretSource);
}
oauthAuthenticationListener = oauthAuthenticationListenerBuilder.build();
}
KafkaListenerType externalListenerType = kubernetesClient.isAdaptable(OpenShiftClient.class) ? KafkaListenerType.ROUTE : KafkaListenerType.INGRESS;
// Limit client connections per listener
Integer totalMaxConnections = Objects.requireNonNullElse(managedKafka.getSpec().getCapacity().getTotalMaxConnections(), this.config.getKafka().getMaxConnections()) / replicas;
// Limit connection attempts per listener
Integer maxConnectionAttemptsPerSec = Objects.requireNonNullElse(managedKafka.getSpec().getCapacity().getMaxConnectionAttemptsPerSec(), this.config.getKafka().getConnectionAttemptsPerSec()) / replicas;
GenericKafkaListenerConfigurationBuilder listenerConfigBuilder = new GenericKafkaListenerConfigurationBuilder().withBootstrap(new GenericKafkaListenerConfigurationBootstrapBuilder().withHost(managedKafka.getSpec().getEndpoint().getBootstrapServerHost()).withAnnotations(Map.of("haproxy.router.openshift.io/balance", "leastconn")).build()).withBrokers(buildBrokerOverrides(managedKafka, replicas)).withBrokerCertChainAndKey(buildTlsCertAndKeySecretSource(managedKafka)).withMaxConnections(totalMaxConnections).withMaxConnectionCreationRate(maxConnectionAttemptsPerSec);
return Arrays.asList(new GenericKafkaListenerBuilder().withName(EXTERNAL_LISTENER_NAME).withPort(9094).withType(externalListenerType).withTls(true).withAuth(plainOverOauthAuthenticationListener).withConfiguration(listenerConfigBuilder.build()).build(), new GenericKafkaListenerBuilder().withName("oauth").withPort(9095).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(oauthAuthenticationListener).withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", AbstractAdminServer.adminServerName(managedKafka)).endPodSelector().build()).build(), new GenericKafkaListenerBuilder().withName("sre").withPort(9096).withType(KafkaListenerType.INTERNAL).withTls(false).build());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAuthenticationOAuth in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdminServer method buildAnnotations.
private Map<String, String> buildAnnotations(ManagedKafka managedKafka) {
List<String> dependsOnSecrets = new ArrayList<>();
dependsOnSecrets.add(SecuritySecretManager.strimziClusterCaCertSecret(managedKafka));
if (SecuritySecretManager.isKafkaExternalCertificateEnabled(managedKafka)) {
dependsOnSecrets.add(SecuritySecretManager.kafkaTlsSecretName(managedKafka));
}
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
if (oauth.getTlsTrustedCertificate() != null) {
dependsOnSecrets.add(SecuritySecretManager.ssoTlsSecretName(managedKafka));
}
}
return Map.of(SecuritySecretManager.ANNOTATION_SECRET_DEP_DIGEST, securitySecretManager.digestSecretsVersions(managedKafka, dependsOnSecrets));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAuthenticationOAuth in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdminServer method buildEnvVar.
private List<EnvVar> buildEnvVar(ManagedKafka managedKafka) {
List<EnvVar> envVars = new ArrayList<>();
addEnvVar(envVars, "KAFKA_ADMIN_REPLICATION_FACTOR", String.valueOf(config.getKafka().getScalingAndReplicationFactor()));
addEnvVar(envVars, "KAFKA_ADMIN_BOOTSTRAP_SERVERS", managedKafka.getMetadata().getName() + "-kafka-bootstrap:9095");
addEnvVar(envVars, "KAFKA_ADMIN_BROKER_TLS_ENABLED", "true");
addEnvVarSecret(envVars, "KAFKA_ADMIN_BROKER_TRUSTED_CERT", SecuritySecretManager.strimziClusterCaCertSecret(managedKafka), "ca.crt");
addEnvVar(envVars, "KAFKA_ADMIN_ACL_RESOURCE_OPERATIONS", this.config.getKafka().getAcl().getResourceOperations());
Integer maxPartitions = managedKafka.getSpec().getCapacity().getMaxPartitions();
if (maxPartitions != null) {
addEnvVar(envVars, "KAFKA_ADMIN_NUM_PARTITIONS_MAX", maxPartitions.toString());
}
if (SecuritySecretManager.isKafkaExternalCertificateEnabled(managedKafka)) {
addEnvVar(envVars, "KAFKA_ADMIN_TLS_CERT", TLS_CONFIG_MOUNT_PATH + "tls.crt");
addEnvVar(envVars, "KAFKA_ADMIN_TLS_KEY", TLS_CONFIG_MOUNT_PATH + "tls.key");
addEnvVar(envVars, "KAFKA_ADMIN_TLS_VERSION", "TLSv1.3,TLSv1.2");
}
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
if (oauth.getTlsTrustedCertificate() != null) {
addEnvVarSecret(envVars, "KAFKA_ADMIN_OAUTH_TRUSTED_CERT", SecuritySecretManager.ssoTlsSecretName(managedKafka), "keycloak.crt");
}
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_JWKS_ENDPOINT_URI", oauth.getJwksEndpointURI());
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_VALID_ISSUER_URI", oauth.getValidIssuerEndpointURI());
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_TOKEN_ENDPOINT_URI", oauth.getTokenEndpointURI());
} else {
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_ENABLED", "false");
}
if (corsAllowList.isPresent()) {
addEnvVar(envVars, "CORS_ALLOW_LIST_REGEX", corsAllowList.get());
}
return envVars;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAuthenticationOAuth in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method buildKafkaConfig.
private Map<String, Object> buildKafkaConfig(ManagedKafka managedKafka, Kafka current) {
Map<String, Object> config = new HashMap<>();
int scalingAndReplicationFactor = this.config.getKafka().getScalingAndReplicationFactor();
config.put("offsets.topic.replication.factor", scalingAndReplicationFactor);
config.put("transaction.state.log.min.isr", Math.min(scalingAndReplicationFactor, 2));
config.put("transaction.state.log.replication.factor", scalingAndReplicationFactor);
config.put("auto.create.topics.enable", "false");
config.put("min.insync.replicas", Math.min(scalingAndReplicationFactor, 2));
config.put("default.replication.factor", scalingAndReplicationFactor);
config.put("log.message.format.version", this.kafkaManager.currentKafkaLogMessageFormatVersion(managedKafka));
config.put("inter.broker.protocol.version", this.kafkaManager.currentKafkaIbpVersion(managedKafka));
config.put("ssl.enabled.protocols", "TLSv1.3,TLSv1.2");
config.put("ssl.protocol", "TLS");
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
var maximumSessionLifetime = oauth != null ? oauth.getMaximumSessionLifetime() : null;
long maxReauthMs = maximumSessionLifetime != null ? Math.max(maximumSessionLifetime, 0) : this.config.getKafka().getMaximumSessionLifetimeDefault();
config.put("connections.max.reauth.ms", maxReauthMs);
if (managedKafka.getSpec().getVersions().compareStrimziVersionTo(Versions.STRIMZI_CLUSTER_OPERATOR_V0_23_0_4) >= 0) {
// extension to manage the create topic to ensure valid Replication Factor and ISR
config.put("create.topic.policy.class.name", "io.bf2.kafka.topic.ManagedKafkaCreateTopicPolicy");
}
// forcing the preferred leader election as soon as possible
// NOTE: mostly useful for canary when Kafka brokers roll, partitions move but a preferred leader is not elected
// this could be removed, when we contribute to Sarama to have the support for Elect Leader API
config.put("leader.imbalance.per.broker.percentage", 0);
config.put(MESSAGE_MAX_BYTES, this.config.getKafka().getMessageMaxBytes());
// configure quota plugin
if (this.config.getKafka().isEnableQuota()) {
addQuotaConfig(managedKafka, current, config);
}
// custom authorizer configuration
addKafkaAuthorizerConfig(managedKafka, config);
if (managedKafka.getSpec().getCapacity().getMaxPartitions() != null) {
config.put(MAX_PARTITIONS, managedKafka.getSpec().getCapacity().getMaxPartitions());
}
config.put("strimzi.authorization.custom-authorizer.partition-counter.timeout-seconds", 10);
config.put("strimzi.authorization.custom-authorizer.partition-counter.schedule-interval-seconds", 15);
config.put("strimzi.authorization.custom-authorizer.partition-counter.private-topic-prefix", this.config.kafka.acl.privatePrefix);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.name", "controlplane-9090");
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.port", 9090);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.protocol", "SSL");
return config;
}
Aggregations