use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSync method syncKafkaClusters.
/**
* Update the local state based upon the remote ManagedKafkas
* The strategy here is to take a pass over the list and find any deferred work
* Then execute that deferred work using the {@link ManagedExecutor} but with
* a refresh of the state to ensure we're still acting appropriately.
*/
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
Map<String, ManagedKafka> remotes = new HashMap<>();
for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
Objects.requireNonNull(remoteManagedKafka.getId());
Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
Objects.requireNonNull(remoteSpec);
String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
if (existing == null) {
if (!remoteSpec.isDeleted()) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
} else {
// we've successfully removed locally, but control plane is not aware
// we need to send another status update to let them know
ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
// fire and forget the async call - if it fails, we'll retry on the next poll
controlPlane.updateKafkaClusterStatus(() -> {
return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
});
}
} else {
final String localNamespace = existing.getMetadata().getNamespace();
final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
Namespace n = kubeClient.namespaces().withName(localNamespace).get();
if (n != null) {
String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
}
}
if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
}
}
}
// process final removals
for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
continue;
}
reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
}
}
Aggregations