use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class KafkaRebalanceAssemblyOperator method buildRebalanceStatusFromPreviousStatus.
private KafkaRebalanceStatus buildRebalanceStatusFromPreviousStatus(KafkaRebalanceStatus currentStatus, Set<Condition> validation) {
List<Condition> conditions = new ArrayList<>();
conditions.addAll(validation);
Condition currentState = rebalanceStateCondition(currentStatus);
conditions.add(currentState);
return new KafkaRebalanceStatusBuilder().withSessionId(currentStatus.getSessionId()).withOptimizationResult(currentStatus.getOptimizationResult()).withConditions(conditions).build();
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas, boolean useKRaft) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
// Configures KRaft and KRaft cluster ID
if (useKRaft) {
result.useKRaft = true;
result.clusterId = getOrGenerateKRaftClusterId(kafkaAssembly);
}
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.rack = kafkaClusterSpec.getRack();
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.initImage = initImage;
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.isJmxEnabled = true;
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.listeners = listeners;
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.authorization = kafkaClusterSpec.getAuthorization();
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
// Should run at the end when everything is set
KafkaSpecChecker specChecker = new KafkaSpecChecker(kafkaSpec, versions, result);
result.warningConditions.addAll(specChecker.run());
return result;
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method maybeUpdateConnectorStatus.
Future<Void> maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConnector connector, ConnectorStatusAndConditions connectorStatus, Throwable error) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
if (error != null) {
LOGGER.warnCr(reconciliation, "Error reconciling connector {}", connector.getMetadata().getName(), error);
}
Map<String, Object> statusResult = null;
List<String> topics = new ArrayList<>();
List<Condition> conditions = new ArrayList<>();
if (connectorStatus != null) {
statusResult = connectorStatus.statusResult;
topics = connectorStatus.topics.stream().sorted().collect(Collectors.toList());
connectorStatus.conditions.forEach(condition -> conditions.add(condition));
}
Set<Condition> unknownAndDeprecatedConditions = validate(reconciliation, connector);
unknownAndDeprecatedConditions.forEach(condition -> conditions.add(condition));
if (!Annotations.isReconciliationPausedWithAnnotation(connector)) {
StatusUtils.setStatusConditionAndObservedGeneration(connector, status, error != null ? Future.failedFuture(error) : Future.succeededFuture());
status.setConnectorStatus(statusResult);
status.setTasksMax(getActualTaskCount(connector, statusResult));
status.setTopics(topics);
} else {
status.setObservedGeneration(connector.getStatus() != null ? connector.getStatus().getObservedGeneration() : 0);
conditions.add(StatusUtils.getPausedCondition());
}
status.addConditions(conditions);
return maybeUpdateStatusCommon(connectorOperator, connector, reconciliation, status, (connector1, status1) -> {
return new KafkaConnectorBuilder(connector1).withStatus(status1).build();
});
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method validate.
public Set<Condition> validate(Reconciliation reconciliation, KafkaConnector resource) {
if (resource != null) {
Set<Condition> warningConditions = new LinkedHashSet<>(0);
ResourceVisitor.visit(reconciliation, resource, new ValidationVisitor(resource, LOGGER, warningConditions));
return warningConditions;
}
return Collections.emptySet();
}
use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperator method createOrUpdate.
@Override
public Future<KafkaStatus> createOrUpdate(Reconciliation reconciliation, Kafka kafkaAssembly) {
Promise<KafkaStatus> createOrUpdatePromise = Promise.promise();
ReconciliationState reconcileState = createReconciliationState(reconciliation, kafkaAssembly);
reconcile(reconcileState).onComplete(reconcileResult -> {
KafkaStatus status = reconcileState.kafkaStatus;
Condition condition;
if (kafkaAssembly.getMetadata().getGeneration() != null) {
status.setObservedGeneration(kafkaAssembly.getMetadata().getGeneration());
}
if (reconcileResult.succeeded()) {
condition = new ConditionBuilder().withLastTransitionTime(StatusUtils.iso8601(dateSupplier())).withType("Ready").withStatus("True").build();
status.addCondition(condition);
createOrUpdatePromise.complete(status);
} else {
condition = new ConditionBuilder().withLastTransitionTime(StatusUtils.iso8601(dateSupplier())).withType("NotReady").withStatus("True").withReason(reconcileResult.cause().getClass().getSimpleName()).withMessage(reconcileResult.cause().getMessage()).build();
status.addCondition(condition);
createOrUpdatePromise.fail(new ReconciliationException(status, reconcileResult.cause()));
}
});
return createOrUpdatePromise.future();
}
Aggregations