use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaInstanceTest method deleteOrderCanaryDeleteBeforeKafkaCluster.
@Test
void deleteOrderCanaryDeleteBeforeKafkaCluster() {
Context<ManagedKafka> context = Mockito.mock(Context.class);
kafkaInstance.delete(DUMMY_MANAGED_KAFKA, context);
InOrder inOrder = inOrder(canary, kafkaCluster);
inOrder.verify(canary).delete(DUMMY_MANAGED_KAFKA, context);
inOrder.verify(kafkaCluster).delete(DUMMY_MANAGED_KAFKA, context);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaUtils method dummyManagedKafka.
public static ManagedKafka dummyManagedKafka(String id) {
ManagedKafka mk = ManagedKafka.getDummyInstance(1);
mk.setId(id);
return mk;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class OperatorST method testUpgradeStrimziVersion.
@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-upgrade";
LOGGER.info("Create namespace");
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
String startVersion = strimziVersions.get(strimziVersions.size() - 2);
LOGGER.info("Create managedkafka with version {}", startVersion);
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, startVersion, latestKafkaVersion);
mk = resourceManager.createResource(extensionContext, mk);
Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
LOGGER.info("Upgrading managedkafka to version {}", latestStrimziVersion);
mkResource.edit(r -> {
r.getSpec().getVersions().setStrimzi(latestStrimziVersion);
return r;
});
mkResource.waitUntilCondition(m -> {
String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
return ManagedKafkaCondition.Reason.StrimziUpdating.name().equals(reason);
}, 5, TimeUnit.MINUTES);
mkResource.waitUntilCondition(m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null && latestStrimziVersion.equals(m.getStatus().getVersions().getStrimzi()), 10, TimeUnit.MINUTES);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaSync method syncKafkaClusters.
/**
* Update the local state based upon the remote ManagedKafkas
* The strategy here is to take a pass over the list and find any deferred work
* Then execute that deferred work using the {@link ManagedExecutor} but with
* a refresh of the state to ensure we're still acting appropriately.
*/
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
Map<String, ManagedKafka> remotes = new HashMap<>();
for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
Objects.requireNonNull(remoteManagedKafka.getId());
Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
Objects.requireNonNull(remoteSpec);
String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
if (existing == null) {
if (!remoteSpec.isDeleted()) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
} else {
// we've successfully removed locally, but control plane is not aware
// we need to send another status update to let them know
ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
// fire and forget the async call - if it fails, we'll retry on the next poll
controlPlane.updateKafkaClusterStatus(() -> {
return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
});
}
} else {
final String localNamespace = existing.getMetadata().getNamespace();
final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
Namespace n = kubeClient.namespaces().withName(localNamespace).get();
if (n != null) {
String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
}
}
if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
}
}
}
// process final removals
for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
continue;
}
reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class MockControlPlane method markForDeletion.
private void markForDeletion(String id) {
ManagedKafka mk = this.kafkas.get(id);
if (mk != null && !mk.isMarkedForDeletion()) {
log.infof("control plane:: marking cluster %s for deletion", mk.getId());
mk.getSpec().setDeleted(true);
} else {
log.infof("control plane:: Is cluster %s already deleted?", id);
}
}
Aggregations