use of org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSync method reconcile.
/**
* Final sync processing of the remote vs. local state
*/
void reconcile(String remoteId, String localMetaNamespaceKey) {
ManagedKafka local = null;
if (localMetaNamespaceKey != null) {
// refresh the local
local = lookup.getLocalManagedKafka(localMetaNamespaceKey);
}
ManagedKafka remote = null;
if (remoteId != null) {
// refresh the remote
remote = controlPlane.getDesiredState(remoteId);
}
if (local == null && remote == null) {
// nothing to do
return;
}
String id = null;
if (local != null) {
id = local.getId();
} else {
id = remote.getId();
}
if (id != null) {
NDC.push(ManagedKafkaResourceClient.ID_LOG_KEY + "=" + id);
}
try {
if (local == null) {
if (!remote.getSpec().isDeleted()) {
create(remote);
}
} else if (remote == null) {
if (deleteAllowed(local)) {
delete(local);
}
} else {
if (!Objects.equals(local.getPlacementId(), remote.getPlacementId())) {
log.debugf("Waiting for existing ManagedKafka %s to disappear before attempting next placement", local.getPlacementId());
return;
}
if (specChanged(remote.getSpec(), local)) {
log.debugf("Updating ManagedKafka Spec for %s", Cache.metaNamespaceKeyFunc(local));
ManagedKafkaSpec spec = remote.getSpec();
client.edit(local.getMetadata().getNamespace(), local.getMetadata().getName(), mk -> {
mk.setSpec(spec);
return mk;
});
// the operator will handle it from here
}
}
} finally {
if (id != null) {
NDC.pop();
}
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSpecTest method testEquals.
@Test
public void testEquals() {
ManagedKafkaSpec spec1 = new ManagedKafkaSpec();
ManagedKafkaSpec spec2 = new ManagedKafkaSpec();
// if it's not equals, then the default equality is being used
assertEquals(spec1, spec2);
spec1.setVersions(new Versions());
spec2.setVersions(new Versions());
assertEquals(spec1, spec2);
spec1.getVersions().setKafka("2.2.2");
assertNotEquals(spec1, spec2);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSpecTest method testIngressEgressBackwardsCompatibility.
@Test
public void testIngressEgressBackwardsCompatibility() {
ManagedKafkaSpec spec = new ManagedKafkaSpecBuilder().withNewCapacity().withNewIngressEgressThroughputPerSec("30Mi").endCapacity().build();
assertEquals(spec.getCapacity().getIngressEgressThroughputPerSec(), spec.getCapacity().getIngressPerSec());
assertEquals(spec.getCapacity().getIngressEgressThroughputPerSec(), spec.getCapacity().getEgressPerSec());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSync method syncKafkaClusters.
/**
* Update the local state based upon the remote ManagedKafkas
* The strategy here is to take a pass over the list and find any deferred work
* Then execute that deferred work using the {@link ManagedExecutor} but with
* a refresh of the state to ensure we're still acting appropriately.
*/
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
Map<String, ManagedKafka> remotes = new HashMap<>();
for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
Objects.requireNonNull(remoteManagedKafka.getId());
Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
Objects.requireNonNull(remoteSpec);
String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
if (existing == null) {
if (!remoteSpec.isDeleted()) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
} else {
// we've successfully removed locally, but control plane is not aware
// we need to send another status update to let them know
ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
// fire and forget the async call - if it fails, we'll retry on the next poll
controlPlane.updateKafkaClusterStatus(() -> {
return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
});
}
} else {
final String localNamespace = existing.getMetadata().getNamespace();
final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
Namespace n = kubeClient.namespaces().withName(localNamespace).get();
if (n != null) {
String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
}
}
if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
}
}
}
// process final removals
for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
continue;
}
reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
}
}
Aggregations