use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentController method buildStatus.
/**
* TODO: this needs to be replaced with actual metrics
* @return
*/
private ManagedKafkaAgentStatus buildStatus(ManagedKafkaAgent resource) {
ManagedKafkaAgentStatus status = resource.getStatus();
ManagedKafkaCondition readyCondition = null;
if (status != null) {
readyCondition = ConditionUtils.findManagedKafkaCondition(status.getConditions(), Type.Ready).orElse(null);
}
List<StrimziVersionStatus> strimziVersions = this.strimziManager.getStrimziVersions();
log.debugf("Strimzi versions %s", strimziVersions);
// consider the fleetshard operator ready when observability is running and a Strimzi bundle is installed (aka at least one available version)
Status statusValue = this.observabilityManager.isObservabilityRunning() && !strimziVersions.isEmpty() ? ManagedKafkaCondition.Status.True : ManagedKafkaCondition.Status.False;
if (readyCondition == null) {
readyCondition = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, statusValue);
} else {
ConditionUtils.updateConditionStatus(readyCondition, statusValue, null, null);
}
ClusterCapacity total = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity remaining = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity delta = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
NodeCounts nodeInfo = new NodeCountsBuilder().withCeiling(0).withCurrent(0).withCurrentWorkLoadMinimum(0).withFloor(0).build();
ClusterResizeInfo resize = new ClusterResizeInfoBuilder().withDelta(delta).withNodeDelta(3).build();
return new ManagedKafkaAgentStatusBuilder().withConditions(status == null ? Arrays.asList(readyCondition) : status.getConditions()).withTotal(total).withRemaining(remaining).withNodeInfo(nodeInfo).withResizeInfo(resize).withUpdatedTimestamp(ConditionUtils.iso8601Now()).withStrimzi(strimziVersions).build();
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaController method updateManagedKafkaStatus.
/**
* Extract from the current KafkaInstance overall status (Kafka, Canary and AdminServer)
* a corresponding list of ManagedKafkaCondition(s) to set on the ManagedKafka status
*
* @param managedKafka ManagedKafka instance
*/
private void updateManagedKafkaStatus(ManagedKafka managedKafka) {
// add status if not already available on the ManagedKafka resource
ManagedKafkaStatus status = Objects.requireNonNullElse(managedKafka.getStatus(), new ManagedKafkaStatusBuilder().build());
status.setUpdatedTimestamp(ConditionUtils.iso8601Now());
managedKafka.setStatus(status);
// add conditions if not already available
List<ManagedKafkaCondition> managedKafkaConditions = managedKafka.getStatus().getConditions();
if (managedKafkaConditions == null) {
managedKafkaConditions = new ArrayList<>();
status.setConditions(managedKafkaConditions);
}
Optional<ManagedKafkaCondition> optReady = ConditionUtils.findManagedKafkaCondition(managedKafkaConditions, ManagedKafkaCondition.Type.Ready);
ManagedKafkaCondition ready = null;
if (optReady.isPresent()) {
ready = optReady.get();
} else {
ready = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, Status.Unknown);
managedKafkaConditions.add(ready);
}
// a not valid ManagedKafka skips the handling of it, so the status will report an error condition
OperandReadiness readiness = this.validity(managedKafka).orElse(kafkaInstance.getReadiness(managedKafka));
ConditionUtils.updateConditionStatus(ready, readiness.getStatus(), readiness.getReason(), readiness.getMessage());
// routes should always be set on the CR status, even if it's just an empty list
status.setRoutes(List.of());
int replicas = kafkaCluster.getReplicas(managedKafka);
if (ingressControllerManagerInstance.isResolvable()) {
IngressControllerManager ingressControllerManager = ingressControllerManagerInstance.get();
List<ManagedKafkaRoute> routes = ingressControllerManager.getManagedKafkaRoutesFor(managedKafka);
// expect route for each broker + 1 for bootstrap URL + 1 for Admin API server
int expectedNumRoutes = replicas + NUM_NON_BROKER_ROUTES;
if (routes.size() >= expectedNumRoutes && routes.stream().noneMatch(r -> "".equals(r.getRouter()))) {
status.setRoutes(routes);
}
}
if (Status.True.equals(readiness.getStatus())) {
status.setCapacity(new ManagedKafkaCapacityBuilder(managedKafka.getSpec().getCapacity()).withMaxDataRetentionSize(kafkaInstance.getKafkaCluster().calculateRetentionSize(managedKafka)).build());
// the versions in the status are updated incrementally copying the spec only when each stage ends
VersionsBuilder versionsBuilder = status.getVersions() != null ? new VersionsBuilder(status.getVersions()) : new VersionsBuilder(managedKafka.getSpec().getVersions());
if (!Reason.StrimziUpdating.equals(readiness.getReason()) && !this.strimziManager.hasStrimziChanged(managedKafka)) {
versionsBuilder.withStrimzi(managedKafka.getSpec().getVersions().getStrimzi());
}
if (!Reason.KafkaUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaVersionChanged(managedKafka)) {
versionsBuilder.withKafka(managedKafka.getSpec().getVersions().getKafka());
}
if (!Reason.KafkaIbpUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaIbpVersionChanged(managedKafka)) {
String kafkaIbp = managedKafka.getSpec().getVersions().getKafkaIbp() != null ? managedKafka.getSpec().getVersions().getKafkaIbp() : AbstractKafkaCluster.getKafkaIbpVersion(managedKafka.getSpec().getVersions().getKafka());
versionsBuilder.withKafkaIbp(kafkaIbp);
}
status.setVersions(versionsBuilder.build());
status.setAdminServerURI(kafkaInstance.getAdminServer().uri(managedKafka));
status.setServiceAccounts(managedKafka.getSpec().getServiceAccounts());
}
}
Aggregations