Search in sources :

Example 11 with Reason

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class StrimziManagerTest method testStrimziVersionChange.

@Test
public void testStrimziVersionChange() {
    ManagedKafka mk = ManagedKafka.getDummyInstance(1);
    mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v1");
    Kafka kafka = this.kafkaCluster.kafkaFrom(mk, null);
    kafkaClient.create(kafka);
    // Kafka reconcile not paused and current label version as the ManagedKafka one
    assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getSpec().getVersions().getStrimzi());
    // ManagedKafka and Kafka updated their status information
    mk.setStatus(new ManagedKafkaStatusBuilder().withVersions(new VersionsBuilder().withStrimzi("strimzi-cluster-operator.v1").build()).build());
    kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("Ready").withStatus("True").build()).build());
    kafkaClient.replaceStatus(kafka);
    // ask for a Strimzi version change on ManagedKafka
    mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v2");
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    // Kafka reconcile paused but label is still the current version
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getStatus().getVersions().getStrimzi());
    // nothing should change after an intermediate reconcile
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    // Kafka moves to be paused
    kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("ReconciliationPaused").withStatus("True").build()).build());
    kafkaClient.replaceStatus(kafka);
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    // Kafka reconcile not paused and Kafka label updated to requested Strimzi version
    assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    // the pause reason should stay until strimzi updates to ready
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), "strimzi-cluster-operator.v2");
}
Also used : ConditionBuilder(io.strimzi.api.kafka.model.status.ConditionBuilder) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) KafkaStatusBuilder(io.strimzi.api.kafka.model.status.KafkaStatusBuilder) VersionsBuilder(org.bf2.operator.resources.v1alpha1.VersionsBuilder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 12 with Reason

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaClusterTest method pausedUnknownStatus.

@Test
void pausedUnknownStatus() throws InterruptedException {
    ManagedKafka mk = ManagedKafka.getDummyInstance(1);
    InformerManager informer = Mockito.mock(InformerManager.class);
    Kafka kafka = new KafkaBuilder(this.kafkaCluster.kafkaFrom(mk, null)).editMetadata().withAnnotations(Map.of(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION, "custom")).endMetadata().withNewStatus().withConditions(new ConditionBuilder().withType("ReconciliationPaused").withStatus("True").build()).endStatus().build();
    Mockito.when(informer.getLocalKafka(Mockito.anyString(), Mockito.anyString())).thenReturn(kafka);
    QuarkusMock.installMockForType(informer, InformerManager.class);
    OperandReadiness readiness = this.kafkaCluster.getReadiness(mk);
    assertEquals(Status.Unknown, readiness.getStatus());
    assertEquals(Reason.Paused, readiness.getReason());
    assertEquals("Kafka mk-1 is paused for an unknown reason", readiness.getMessage());
}
Also used : ConditionBuilder(io.strimzi.api.kafka.model.status.ConditionBuilder) ManagedKafkaUtils.exampleManagedKafka(org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaUtils.exampleManagedKafka(org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka) Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) InformerManager(org.bf2.operator.managers.InformerManager) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 13 with Reason

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class OperatorST method testUpgradeStrimziVersion.

@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-upgrade";
    LOGGER.info("Create namespace");
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    String startVersion = strimziVersions.get(strimziVersions.size() - 2);
    LOGGER.info("Create managedkafka with version {}", startVersion);
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, startVersion, latestKafkaVersion);
    mk = resourceManager.createResource(extensionContext, mk);
    Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
    LOGGER.info("Upgrading managedkafka to version {}", latestStrimziVersion);
    mkResource.edit(r -> {
        r.getSpec().getVersions().setStrimzi(latestStrimziVersion);
        return r;
    });
    mkResource.waitUntilCondition(m -> {
        String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
        return ManagedKafkaCondition.Reason.StrimziUpdating.name().equals(reason);
    }, 5, TimeUnit.MINUTES);
    mkResource.waitUntilCondition(m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null && latestStrimziVersion.equals(m.getStatus().getVersions().getStrimzi()), 10, TimeUnit.MINUTES);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 14 with Reason

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaSync method syncKafkaClusters.

/**
 * Update the local state based upon the remote ManagedKafkas
 * The strategy here is to take a pass over the list and find any deferred work
 * Then execute that deferred work using the {@link ManagedExecutor} but with
 * a refresh of the state to ensure we're still acting appropriately.
 */
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
    Map<String, ManagedKafka> remotes = new HashMap<>();
    for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
        Objects.requireNonNull(remoteManagedKafka.getId());
        Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
        remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
        ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
        Objects.requireNonNull(remoteSpec);
        String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
        ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
        if (existing == null) {
            if (!remoteSpec.isDeleted()) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            } else {
                // we've successfully removed locally, but control plane is not aware
                // we need to send another status update to let them know
                ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
                statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
                // fire and forget the async call - if it fails, we'll retry on the next poll
                controlPlane.updateKafkaClusterStatus(() -> {
                    return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
                });
            }
        } else {
            final String localNamespace = existing.getMetadata().getNamespace();
            final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
            Namespace n = kubeClient.namespaces().withName(localNamespace).get();
            if (n != null) {
                String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
                if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
                    kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
                }
            }
            if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            }
        }
    }
    // process final removals
    for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
        if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
            continue;
        }
        reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
    }
}
Also used : ManagedKafkaResourceClient(org.bf2.common.ManagedKafkaResourceClient) HttpURLConnection(java.net.HttpURLConnection) Status(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status) Timed(io.micrometer.core.annotation.Timed) Logger(org.jboss.logging.Logger) Cache(io.fabric8.kubernetes.client.informers.cache.Cache) HashMap(java.util.HashMap) Inject(javax.inject.Inject) ControlPlane(org.bf2.sync.controlplane.ControlPlane) Map(java.util.Map) ExecutorService(java.util.concurrent.ExecutorService) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) LocalLookup(org.bf2.sync.informer.LocalLookup) Type(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type) Scheduled(io.quarkus.scheduler.Scheduled) OperandUtils(org.bf2.common.OperandUtils) NDC(org.jboss.logging.NDC) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ConditionUtils(org.bf2.common.ConditionUtils) Reason(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason) Objects(java.util.Objects) Counted(io.micrometer.core.annotation.Counted) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) ManagedExecutor(org.eclipse.microprofile.context.ManagedExecutor) Optional(java.util.Optional) ApplicationScoped(javax.enterprise.context.ApplicationScoped) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ConcurrentExecution(io.quarkus.scheduler.Scheduled.ConcurrentExecution) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) HashMap(java.util.HashMap) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) Counted(io.micrometer.core.annotation.Counted) Timed(io.micrometer.core.annotation.Timed)

Example 15 with Reason

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason in project cos-fleetshard by bf2fc6cc711aee1a0c2a.

the class ConnectorStatusExtractor method extract.

public static ConnectorDeploymentStatus extract(ManagedConnector connector) {
    ConnectorDeploymentStatus status = new ConnectorDeploymentStatus();
    DeploymentSpec deployment = connector.getSpec().getDeployment();
    if (connector.getStatus() != null && connector.getStatus().getPhase() != null) {
        deployment = connector.getStatus().getDeployment();
    }
    status.setResourceVersion(deployment.getDeploymentResourceVersion());
    if (connector.getSpec().getOperatorSelector() == null || connector.getSpec().getOperatorSelector().getId() == null) {
        status.setPhase(STATE_FAILED);
        status.addConditionsItem(new MetaV1Condition().type(Conditions.TYPE_READY).status(Conditions.STATUS_FALSE).message("No assignable operator").reason(Conditions.NO_ASSIGNABLE_OPERATOR_REASON).lastTransitionTime(Conditions.now()));
        return status;
    }
    if (connector.getStatus() != null && connector.getStatus().getConnectorStatus() != null) {
        status.setOperators(new ConnectorDeploymentStatusOperators().assigned(toConnectorOperator(connector.getStatus().getConnectorStatus().getAssignedOperator())).available(toConnectorOperator(connector.getStatus().getConnectorStatus().getAvailableOperator())));
        if (connector.getStatus().getConnectorStatus() != null) {
            if (connector.getStatus().getConnectorStatus().getPhase() != null) {
                status.setPhase(connector.getStatus().getConnectorStatus().getPhase());
            }
            if (connector.getStatus().getConnectorStatus().getConditions() != null) {
                for (var cond : connector.getStatus().getConnectorStatus().getConditions()) {
                    status.addConditionsItem(toMetaV1Condition(cond));
                }
            }
        }
    }
    if (status.getPhase() == null) {
        if (DESIRED_STATE_DELETED.equals(deployment.getDesiredState())) {
            status.setPhase(STATE_DE_PROVISIONING);
        } else if (DESIRED_STATE_STOPPED.equals(deployment.getDesiredState())) {
            status.setPhase(STATE_DE_PROVISIONING);
        } else {
            status.setPhase(STATE_PROVISIONING);
        }
    }
    return status;
}
Also used : DeploymentSpec(org.bf2.cos.fleetshard.api.DeploymentSpec) ConnectorDeploymentStatusOperators(org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatusOperators) MetaV1Condition(org.bf2.cos.fleet.manager.model.MetaV1Condition) ConnectorDeploymentStatus(org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)7 Objects (java.util.Objects)4 MetaV1Condition (org.bf2.cos.fleet.manager.model.MetaV1Condition)4 Namespace (io.fabric8.kubernetes.api.model.Namespace)3 NamespaceBuilder (io.fabric8.kubernetes.api.model.NamespaceBuilder)3 Kafka (io.strimzi.api.kafka.model.Kafka)3 ConditionBuilder (io.strimzi.api.kafka.model.status.ConditionBuilder)3 Inject (javax.inject.Inject)3 ConnectorDeploymentStatus (org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)3 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)2 Context (io.javaoperatorsdk.operator.api.Context)2 QuarkusTest (io.quarkus.test.junit.QuarkusTest)2 KafkaConnectorBuilder (io.strimzi.api.kafka.model.KafkaConnectorBuilder)2 KafkaConnectorStatusBuilder (io.strimzi.api.kafka.model.status.KafkaConnectorStatusBuilder)2 Arrays (java.util.Arrays)2 List (java.util.List)2 ConnectorNamespace (org.bf2.cos.fleet.manager.model.ConnectorNamespace)2 Reason (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason)2 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)2 ManagedKafkaStatusBuilder (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder)2