use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method getReadiness.
@Override
public OperandReadiness getReadiness(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return new OperandReadiness(Status.False, Reason.Installing, String.format("Kafka %s does not exist", kafkaClusterName(managedKafka)));
}
Optional<Condition> notReady = kafkaCondition(kafka, c -> "NotReady".equals(c.getType()));
if (notReady.filter(c -> "True".equals(c.getStatus())).isPresent()) {
Condition c = notReady.get();
return new OperandReadiness(Status.False, "Creating".equals(c.getReason()) ? Reason.Installing : Reason.Error, c.getMessage());
}
if (isStrimziUpdating(managedKafka)) {
// the status here is actually unknown
return new OperandReadiness(Status.True, Reason.StrimziUpdating, null);
}
if (isKafkaUpdating(managedKafka) || isKafkaUpgradeStabilityChecking(managedKafka)) {
return new OperandReadiness(Status.True, Reason.KafkaUpdating, null);
}
if (isKafkaIbpUpdating(managedKafka)) {
return new OperandReadiness(Status.True, Reason.KafkaIbpUpdating, null);
}
Optional<Condition> ready = kafkaCondition(kafka, c -> "Ready".equals(c.getType()));
if (ready.filter(c -> "True".equals(c.getStatus())).isPresent()) {
return new OperandReadiness(Status.True, null, null);
}
if (isReconciliationPaused(managedKafka)) {
// strimzi may in the future report the status even when paused, but for now we don't know
return new OperandReadiness(Status.Unknown, Reason.Paused, String.format("Kafka %s is paused for an unknown reason", kafkaClusterName(managedKafka)));
}
return new OperandReadiness(Status.False, Reason.Installing, String.format("Kafka %s is not providing status", kafkaClusterName(managedKafka)));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method buildListeners.
protected List<GenericKafkaListener> buildListeners(ManagedKafka managedKafka, int replicas) {
KafkaListenerAuthentication plainOverOauthAuthenticationListener = null;
KafkaListenerAuthentication oauthAuthenticationListener = null;
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth managedKafkaAuthenticationOAuth = managedKafka.getSpec().getOauth();
CertSecretSource ssoTlsCertSecretSource = buildSsoTlsCertSecretSource(managedKafka);
KafkaListenerAuthenticationOAuthBuilder plainOverOauthAuthenticationListenerBuilder = new KafkaListenerAuthenticationOAuthBuilder().withClientId(managedKafkaAuthenticationOAuth.getClientId()).withJwksEndpointUri(managedKafkaAuthenticationOAuth.getJwksEndpointURI()).withUserNameClaim(managedKafkaAuthenticationOAuth.getUserNameClaim()).withFallbackUserNameClaim(managedKafkaAuthenticationOAuth.getFallbackUserNameClaim()).withCustomClaimCheck(managedKafkaAuthenticationOAuth.getCustomClaimCheck()).withValidIssuerUri(managedKafkaAuthenticationOAuth.getValidIssuerEndpointURI()).withClientSecret(buildSsoClientGenericSecretSource(managedKafka)).withEnablePlain(true).withTokenEndpointUri(managedKafkaAuthenticationOAuth.getTokenEndpointURI());
if (ssoTlsCertSecretSource != null) {
plainOverOauthAuthenticationListenerBuilder.withTlsTrustedCertificates(ssoTlsCertSecretSource);
}
plainOverOauthAuthenticationListener = plainOverOauthAuthenticationListenerBuilder.build();
KafkaListenerAuthenticationOAuthBuilder oauthAuthenticationListenerBuilder = new KafkaListenerAuthenticationOAuthBuilder().withClientId(managedKafkaAuthenticationOAuth.getClientId()).withJwksEndpointUri(managedKafkaAuthenticationOAuth.getJwksEndpointURI()).withUserNameClaim(managedKafkaAuthenticationOAuth.getUserNameClaim()).withFallbackUserNameClaim(managedKafkaAuthenticationOAuth.getFallbackUserNameClaim()).withCustomClaimCheck(managedKafkaAuthenticationOAuth.getCustomClaimCheck()).withValidIssuerUri(managedKafkaAuthenticationOAuth.getValidIssuerEndpointURI()).withClientSecret(buildSsoClientGenericSecretSource(managedKafka));
if (ssoTlsCertSecretSource != null) {
oauthAuthenticationListenerBuilder.withTlsTrustedCertificates(ssoTlsCertSecretSource);
}
oauthAuthenticationListener = oauthAuthenticationListenerBuilder.build();
}
KafkaListenerType externalListenerType = kubernetesClient.isAdaptable(OpenShiftClient.class) ? KafkaListenerType.ROUTE : KafkaListenerType.INGRESS;
// Limit client connections per listener
Integer totalMaxConnections = Objects.requireNonNullElse(managedKafka.getSpec().getCapacity().getTotalMaxConnections(), this.config.getKafka().getMaxConnections()) / replicas;
// Limit connection attempts per listener
Integer maxConnectionAttemptsPerSec = Objects.requireNonNullElse(managedKafka.getSpec().getCapacity().getMaxConnectionAttemptsPerSec(), this.config.getKafka().getConnectionAttemptsPerSec()) / replicas;
GenericKafkaListenerConfigurationBuilder listenerConfigBuilder = new GenericKafkaListenerConfigurationBuilder().withBootstrap(new GenericKafkaListenerConfigurationBootstrapBuilder().withHost(managedKafka.getSpec().getEndpoint().getBootstrapServerHost()).withAnnotations(Map.of("haproxy.router.openshift.io/balance", "leastconn")).build()).withBrokers(buildBrokerOverrides(managedKafka, replicas)).withBrokerCertChainAndKey(buildTlsCertAndKeySecretSource(managedKafka)).withMaxConnections(totalMaxConnections).withMaxConnectionCreationRate(maxConnectionAttemptsPerSec);
return Arrays.asList(new GenericKafkaListenerBuilder().withName(EXTERNAL_LISTENER_NAME).withPort(9094).withType(externalListenerType).withTls(true).withAuth(plainOverOauthAuthenticationListener).withConfiguration(listenerConfigBuilder.build()).build(), new GenericKafkaListenerBuilder().withName("oauth").withPort(9095).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(oauthAuthenticationListener).withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", AbstractAdminServer.adminServerName(managedKafka)).endPodSelector().build()).build(), new GenericKafkaListenerBuilder().withName("sre").withPort(9096).withType(KafkaListenerType.INTERNAL).withTls(false).build());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isKafkaAnnotationUpdating.
private boolean isKafkaAnnotationUpdating(ManagedKafka managedKafka, String annotation, Function<Kafka, String> valueSupplier) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return false;
}
List<Pod> kafkaPods = kubernetesClient.pods().inNamespace(kafka.getMetadata().getNamespace()).withLabel("strimzi.io/name", kafka.getMetadata().getName() + "-kafka").list().getItems();
boolean isKafkaAnnotationUpdating = false;
String expectedValue = valueSupplier.apply(kafka);
for (Pod kafkaPod : kafkaPods) {
String annotationValueOnPod = Optional.ofNullable(kafkaPod.getMetadata().getAnnotations()).map(annotations -> annotations.get(annotation)).orElse(null);
if (annotationValueOnPod == null) {
log.errorf("Kafka pod [%s] is missing annotation '%s'", kafkaPod.getMetadata().getName(), annotation);
throw new RuntimeException();
}
log.tracef("Kafka pod [%s] annotation '%s' = %s [expected value %s]", kafkaPod.getMetadata().getName(), annotation, annotationValueOnPod, expectedValue);
isKafkaAnnotationUpdating |= !annotationValueOnPod.equals(expectedValue);
if (isKafkaAnnotationUpdating) {
break;
}
}
return isKafkaAnnotationUpdating;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdminServer method buildAnnotations.
private Map<String, String> buildAnnotations(ManagedKafka managedKafka) {
List<String> dependsOnSecrets = new ArrayList<>();
dependsOnSecrets.add(SecuritySecretManager.strimziClusterCaCertSecret(managedKafka));
if (SecuritySecretManager.isKafkaExternalCertificateEnabled(managedKafka)) {
dependsOnSecrets.add(SecuritySecretManager.kafkaTlsSecretName(managedKafka));
}
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
if (oauth.getTlsTrustedCertificate() != null) {
dependsOnSecrets.add(SecuritySecretManager.ssoTlsSecretName(managedKafka));
}
}
return Map.of(SecuritySecretManager.ANNOTATION_SECRET_DEP_DIGEST, securitySecretManager.digestSecretsVersions(managedKafka, dependsOnSecrets));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdminServer method buildEnvVar.
private List<EnvVar> buildEnvVar(ManagedKafka managedKafka) {
List<EnvVar> envVars = new ArrayList<>();
addEnvVar(envVars, "KAFKA_ADMIN_REPLICATION_FACTOR", String.valueOf(config.getKafka().getScalingAndReplicationFactor()));
addEnvVar(envVars, "KAFKA_ADMIN_BOOTSTRAP_SERVERS", managedKafka.getMetadata().getName() + "-kafka-bootstrap:9095");
addEnvVar(envVars, "KAFKA_ADMIN_BROKER_TLS_ENABLED", "true");
addEnvVarSecret(envVars, "KAFKA_ADMIN_BROKER_TRUSTED_CERT", SecuritySecretManager.strimziClusterCaCertSecret(managedKafka), "ca.crt");
addEnvVar(envVars, "KAFKA_ADMIN_ACL_RESOURCE_OPERATIONS", this.config.getKafka().getAcl().getResourceOperations());
Integer maxPartitions = managedKafka.getSpec().getCapacity().getMaxPartitions();
if (maxPartitions != null) {
addEnvVar(envVars, "KAFKA_ADMIN_NUM_PARTITIONS_MAX", maxPartitions.toString());
}
if (SecuritySecretManager.isKafkaExternalCertificateEnabled(managedKafka)) {
addEnvVar(envVars, "KAFKA_ADMIN_TLS_CERT", TLS_CONFIG_MOUNT_PATH + "tls.crt");
addEnvVar(envVars, "KAFKA_ADMIN_TLS_KEY", TLS_CONFIG_MOUNT_PATH + "tls.key");
addEnvVar(envVars, "KAFKA_ADMIN_TLS_VERSION", "TLSv1.3,TLSv1.2");
}
if (SecuritySecretManager.isKafkaAuthenticationEnabled(managedKafka)) {
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
if (oauth.getTlsTrustedCertificate() != null) {
addEnvVarSecret(envVars, "KAFKA_ADMIN_OAUTH_TRUSTED_CERT", SecuritySecretManager.ssoTlsSecretName(managedKafka), "keycloak.crt");
}
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_JWKS_ENDPOINT_URI", oauth.getJwksEndpointURI());
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_VALID_ISSUER_URI", oauth.getValidIssuerEndpointURI());
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_TOKEN_ENDPOINT_URI", oauth.getTokenEndpointURI());
} else {
addEnvVar(envVars, "KAFKA_ADMIN_OAUTH_ENABLED", "false");
}
if (corsAllowList.isPresent()) {
addEnvVar(envVars, "CORS_ALLOW_LIST_REGEX", corsAllowList.get());
}
return envVars;
}
Aggregations