Search in sources :

Example 76 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaInstanceTest method deleteOrderCanaryDeleteBeforeKafkaCluster.

@Test
void deleteOrderCanaryDeleteBeforeKafkaCluster() {
    Context<ManagedKafka> context = Mockito.mock(Context.class);
    kafkaInstance.delete(DUMMY_MANAGED_KAFKA, context);
    InOrder inOrder = inOrder(canary, kafkaCluster);
    inOrder.verify(canary).delete(DUMMY_MANAGED_KAFKA, context);
    inOrder.verify(kafkaCluster).delete(DUMMY_MANAGED_KAFKA, context);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) InOrder(org.mockito.InOrder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 77 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaUtils method dummyManagedKafka.

public static ManagedKafka dummyManagedKafka(String id) {
    ManagedKafka mk = ManagedKafka.getDummyInstance(1);
    mk.setId(id);
    return mk;
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 78 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class OperatorST method testUpgradeStrimziVersion.

@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-upgrade";
    LOGGER.info("Create namespace");
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    String startVersion = strimziVersions.get(strimziVersions.size() - 2);
    LOGGER.info("Create managedkafka with version {}", startVersion);
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, startVersion, latestKafkaVersion);
    mk = resourceManager.createResource(extensionContext, mk);
    Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
    LOGGER.info("Upgrading managedkafka to version {}", latestStrimziVersion);
    mkResource.edit(r -> {
        r.getSpec().getVersions().setStrimzi(latestStrimziVersion);
        return r;
    });
    mkResource.waitUntilCondition(m -> {
        String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
        return ManagedKafkaCondition.Reason.StrimziUpdating.name().equals(reason);
    }, 5, TimeUnit.MINUTES);
    mkResource.waitUntilCondition(m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null && latestStrimziVersion.equals(m.getStatus().getVersions().getStrimzi()), 10, TimeUnit.MINUTES);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 79 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaSync method syncKafkaClusters.

/**
 * Update the local state based upon the remote ManagedKafkas
 * The strategy here is to take a pass over the list and find any deferred work
 * Then execute that deferred work using the {@link ManagedExecutor} but with
 * a refresh of the state to ensure we're still acting appropriately.
 */
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
    Map<String, ManagedKafka> remotes = new HashMap<>();
    for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
        Objects.requireNonNull(remoteManagedKafka.getId());
        Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
        remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
        ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
        Objects.requireNonNull(remoteSpec);
        String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
        ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
        if (existing == null) {
            if (!remoteSpec.isDeleted()) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            } else {
                // we've successfully removed locally, but control plane is not aware
                // we need to send another status update to let them know
                ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
                statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
                // fire and forget the async call - if it fails, we'll retry on the next poll
                controlPlane.updateKafkaClusterStatus(() -> {
                    return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
                });
            }
        } else {
            final String localNamespace = existing.getMetadata().getNamespace();
            final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
            Namespace n = kubeClient.namespaces().withName(localNamespace).get();
            if (n != null) {
                String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
                if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
                    kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
                }
            }
            if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            }
        }
    }
    // process final removals
    for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
        if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
            continue;
        }
        reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
    }
}
Also used : ManagedKafkaResourceClient(org.bf2.common.ManagedKafkaResourceClient) HttpURLConnection(java.net.HttpURLConnection) Status(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status) Timed(io.micrometer.core.annotation.Timed) Logger(org.jboss.logging.Logger) Cache(io.fabric8.kubernetes.client.informers.cache.Cache) HashMap(java.util.HashMap) Inject(javax.inject.Inject) ControlPlane(org.bf2.sync.controlplane.ControlPlane) Map(java.util.Map) ExecutorService(java.util.concurrent.ExecutorService) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) LocalLookup(org.bf2.sync.informer.LocalLookup) Type(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type) Scheduled(io.quarkus.scheduler.Scheduled) OperandUtils(org.bf2.common.OperandUtils) NDC(org.jboss.logging.NDC) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ConditionUtils(org.bf2.common.ConditionUtils) Reason(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason) Objects(java.util.Objects) Counted(io.micrometer.core.annotation.Counted) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) ManagedExecutor(org.eclipse.microprofile.context.ManagedExecutor) Optional(java.util.Optional) ApplicationScoped(javax.enterprise.context.ApplicationScoped) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ConcurrentExecution(io.quarkus.scheduler.Scheduled.ConcurrentExecution) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) HashMap(java.util.HashMap) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) Counted(io.micrometer.core.annotation.Counted) Timed(io.micrometer.core.annotation.Timed)

Example 80 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class MockControlPlane method markForDeletion.

private void markForDeletion(String id) {
    ManagedKafka mk = this.kafkas.get(id);
    if (mk != null && !mk.isMarkedForDeletion()) {
        log.infof("control plane:: marking cluster %s for deletion", mk.getId());
        mk.getSpec().setDeleted(true);
    } else {
        log.infof("control plane:: Is cluster %s already deleted?", id);
    }
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)67 Kafka (io.strimzi.api.kafka.model.Kafka)30 Test (org.junit.jupiter.api.Test)24 QuarkusTest (io.quarkus.test.junit.QuarkusTest)23 List (java.util.List)16 Map (java.util.Map)15 Inject (javax.inject.Inject)15 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)15 Objects (java.util.Objects)14 Quantity (io.fabric8.kubernetes.api.model.Quantity)11 Optional (java.util.Optional)11 Collectors (java.util.stream.Collectors)10 ApplicationScoped (javax.enterprise.context.ApplicationScoped)9 StrimziManager (org.bf2.operator.managers.StrimziManager)9 Logger (org.jboss.logging.Logger)9 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 ArrayList (java.util.ArrayList)8 Reason (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason)8 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)8 ManagedKafkaUtils.exampleManagedKafka (org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka)8