Search in sources :

Example 1 with ManagedKafkaStatus

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ControlPlaneTest method testManagedKafkaStatusComparison.

@Test
public void testManagedKafkaStatusComparison() {
    ManagedKafkaStatus status = null;
    assertFalse(ControlPlane.statusChanged(status, status));
    status = new ManagedKafkaStatus();
    assertFalse(ControlPlane.statusChanged(status, null));
    assertFalse(ControlPlane.statusChanged(status, status));
    assertTrue(ControlPlane.statusChanged(null, status));
    ManagedKafkaStatus newStatus = new ManagedKafkaStatusBuilder().withUpdatedTimestamp("2020-01-01").build();
    assertTrue(ControlPlane.statusChanged(status, newStatus));
    assertTrue(ControlPlane.statusChanged(newStatus, status));
    status.setConditions(new ArrayList<>());
    status.setUpdatedTimestamp("2021-01-01");
    assertTrue(ControlPlane.statusChanged(newStatus, status));
    newStatus.setUpdatedTimestamp("2022-01-01");
    assertTrue(ControlPlane.statusChanged(status, newStatus));
}
Also used : ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) Test(org.junit.jupiter.api.Test)

Example 2 with ManagedKafkaStatus

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class MockControlPlane method updateKafkaClustersStatus.

@Override
public void updateKafkaClustersStatus(@PathParam(value = "id") String id, Map<String, ManagedKafkaStatus> statusMap) {
    log.infof("control plane:: updateKafkaClustersStatus <- Received from cluster %s, %s", id, statusMap);
    // clean up the deleted
    statusMap.forEach((k, v) -> {
        log.infof("control plane:: Status of %s received", k);
        ManagedKafka mk = this.kafkas.get(k);
        if (mk != null) {
            if (mk.getSpec().isDeleted() && isDeleted(v)) {
                log.infof("control plane:: Removing cluster %s as it is deleted", mk.getId());
                this.kafkas.remove(k);
                this.kafkaStatus.remove(k);
            } else {
                this.kafkaStatus.put(k, v);
            }
        }
    });
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 3 with ManagedKafkaStatus

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaController method updateManagedKafkaStatus.

/**
 * Extract from the current KafkaInstance overall status (Kafka, Canary and AdminServer)
 * a corresponding list of ManagedKafkaCondition(s) to set on the ManagedKafka status
 *
 * @param managedKafka ManagedKafka instance
 */
private void updateManagedKafkaStatus(ManagedKafka managedKafka) {
    // add status if not already available on the ManagedKafka resource
    ManagedKafkaStatus status = Objects.requireNonNullElse(managedKafka.getStatus(), new ManagedKafkaStatusBuilder().build());
    status.setUpdatedTimestamp(ConditionUtils.iso8601Now());
    managedKafka.setStatus(status);
    // add conditions if not already available
    List<ManagedKafkaCondition> managedKafkaConditions = managedKafka.getStatus().getConditions();
    if (managedKafkaConditions == null) {
        managedKafkaConditions = new ArrayList<>();
        status.setConditions(managedKafkaConditions);
    }
    Optional<ManagedKafkaCondition> optReady = ConditionUtils.findManagedKafkaCondition(managedKafkaConditions, ManagedKafkaCondition.Type.Ready);
    ManagedKafkaCondition ready = null;
    if (optReady.isPresent()) {
        ready = optReady.get();
    } else {
        ready = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, Status.Unknown);
        managedKafkaConditions.add(ready);
    }
    // a not valid ManagedKafka skips the handling of it, so the status will report an error condition
    OperandReadiness readiness = this.validity(managedKafka).orElse(kafkaInstance.getReadiness(managedKafka));
    ConditionUtils.updateConditionStatus(ready, readiness.getStatus(), readiness.getReason(), readiness.getMessage());
    // routes should always be set on the CR status, even if it's just an empty list
    status.setRoutes(List.of());
    int replicas = kafkaCluster.getReplicas(managedKafka);
    if (ingressControllerManagerInstance.isResolvable()) {
        IngressControllerManager ingressControllerManager = ingressControllerManagerInstance.get();
        List<ManagedKafkaRoute> routes = ingressControllerManager.getManagedKafkaRoutesFor(managedKafka);
        // expect route for each broker + 1 for bootstrap URL + 1 for Admin API server
        int expectedNumRoutes = replicas + NUM_NON_BROKER_ROUTES;
        if (routes.size() >= expectedNumRoutes && routes.stream().noneMatch(r -> "".equals(r.getRouter()))) {
            status.setRoutes(routes);
        }
    }
    if (Status.True.equals(readiness.getStatus())) {
        status.setCapacity(new ManagedKafkaCapacityBuilder(managedKafka.getSpec().getCapacity()).withMaxDataRetentionSize(kafkaInstance.getKafkaCluster().calculateRetentionSize(managedKafka)).build());
        // the versions in the status are updated incrementally copying the spec only when each stage ends
        VersionsBuilder versionsBuilder = status.getVersions() != null ? new VersionsBuilder(status.getVersions()) : new VersionsBuilder(managedKafka.getSpec().getVersions());
        if (!Reason.StrimziUpdating.equals(readiness.getReason()) && !this.strimziManager.hasStrimziChanged(managedKafka)) {
            versionsBuilder.withStrimzi(managedKafka.getSpec().getVersions().getStrimzi());
        }
        if (!Reason.KafkaUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaVersionChanged(managedKafka)) {
            versionsBuilder.withKafka(managedKafka.getSpec().getVersions().getKafka());
        }
        if (!Reason.KafkaIbpUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaIbpVersionChanged(managedKafka)) {
            String kafkaIbp = managedKafka.getSpec().getVersions().getKafkaIbp() != null ? managedKafka.getSpec().getVersions().getKafkaIbp() : AbstractKafkaCluster.getKafkaIbpVersion(managedKafka.getSpec().getVersions().getKafka());
            versionsBuilder.withKafkaIbp(kafkaIbp);
        }
        status.setVersions(versionsBuilder.build());
        status.setAdminServerURI(kafkaInstance.getAdminServer().uri(managedKafka));
        status.setServiceAccounts(managedKafka.getSpec().getServiceAccounts());
    }
}
Also used : DeleteControl(io.javaoperatorsdk.operator.api.DeleteControl) ManagedKafkaResourceClient(org.bf2.common.ManagedKafkaResourceClient) Context(io.javaoperatorsdk.operator.api.Context) Status(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status) Timed(io.micrometer.core.annotation.Timed) StrimziVersionStatus(org.bf2.operator.resources.v1alpha1.StrimziVersionStatus) Logger(org.jboss.logging.Logger) StrimziManager(org.bf2.operator.managers.StrimziManager) ManagedKafkaRoute(org.bf2.operator.resources.v1alpha1.ManagedKafkaRoute) ResourceEventSource(org.bf2.operator.events.ResourceEventSource) ArrayList(java.util.ArrayList) Controller(io.javaoperatorsdk.operator.api.Controller) VersionsBuilder(org.bf2.operator.resources.v1alpha1.VersionsBuilder) Inject(javax.inject.Inject) KafkaInstance(org.bf2.operator.operands.KafkaInstance) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) UpdateControl(io.javaoperatorsdk.operator.api.UpdateControl) AbstractKafkaCluster(org.bf2.operator.operands.AbstractKafkaCluster) KafkaManager(org.bf2.operator.managers.KafkaManager) Instance(javax.enterprise.inject.Instance) NDC(org.jboss.logging.NDC) KafkaInstanceConfiguration(org.bf2.operator.operands.KafkaInstanceConfiguration) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) IngressControllerManager(org.bf2.operator.managers.IngressControllerManager) ConditionUtils(org.bf2.common.ConditionUtils) Reason(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason) Objects(java.util.Objects) List(java.util.List) Counted(io.micrometer.core.annotation.Counted) ManagedKafkaCondition(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition) OperandReadiness(org.bf2.operator.operands.OperandReadiness) ManagedKafkaCapacityBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacityBuilder) Optional(java.util.Optional) ResourceController(io.javaoperatorsdk.operator.api.ResourceController) EventSourceManager(io.javaoperatorsdk.operator.processing.event.EventSourceManager) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaCapacityBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacityBuilder) ManagedKafkaCondition(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition) VersionsBuilder(org.bf2.operator.resources.v1alpha1.VersionsBuilder) IngressControllerManager(org.bf2.operator.managers.IngressControllerManager) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ManagedKafkaRoute(org.bf2.operator.resources.v1alpha1.ManagedKafkaRoute) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) OperandReadiness(org.bf2.operator.operands.OperandReadiness)

Example 4 with ManagedKafkaStatus

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.

@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(1);
    try {
        String mkAppName = "mk-test-restart-kubeapi";
        ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
        // start restarting kubeapi
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // Create mk using api
        resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
        resourceManager.addResource(extensionContext, mk);
        HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
        LOGGER.info("ManagedKafka {} created", mkAppName);
        // wait for the sync to be up-to-date
        TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
            try {
                String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
                if (statusBody.isEmpty()) {
                    return false;
                }
                ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
                return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
            } catch (Exception e) {
                throw new AssertionError(e);
            }
        });
        // Get status and compare with CR status
        ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
        ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
        AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
        // Get agent status
        ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
        AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
        // Check if managed kafka deployed all components
        AssertUtils.assertManagedKafka(mk);
        // start restarting kubeapi
        executor = Executors.newFixedThreadPool(1);
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // delete mk using api
        res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        ManagedKafkaResourceType.isDeleted(mk);
        LOGGER.info("ManagedKafka {} deleted", mkAppName);
    } finally {
        executor.shutdownNow();
    }
}
Also used : TestUtils(org.bf2.test.TestUtils) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ExecutorService(java.util.concurrent.ExecutorService) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 5 with ManagedKafkaStatus

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SmokeST method testCreateManagedKafka.

@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-create";
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
    String id = mk.getId();
    // Create mk using api
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    resourceManager.addResource(extensionContext, mk);
    HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    resourceManager.waitResourceCondition(mk, Objects::nonNull);
    mk = resourceManager.waitUntilReady(mk, 300_000);
    LOGGER.info("ManagedKafka {} created", mkAppName);
    // wait for the sync to be up-to-date
    TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
        try {
            String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
            if (statusBody.isEmpty()) {
                return false;
            }
            ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
            return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
        } catch (Exception e) {
            throw new AssertionError(e);
        }
    });
    // Get status and compare with CR status
    ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
    ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
    AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
    // Get agent status
    ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
    AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
    // Check if managed kafka deployed all components
    AssertUtils.assertManagedKafka(mk);
    // delete mk using api
    res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    ManagedKafkaResourceType.isDeleted(mk);
    LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Objects(java.util.Objects) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)6 ManagedKafkaStatus (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus)5 ManagedKafkaStatusBuilder (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder)3 Test (org.junit.jupiter.api.Test)3 NamespaceBuilder (io.fabric8.kubernetes.api.model.NamespaceBuilder)2 QuarkusTest (io.quarkus.test.junit.QuarkusTest)2 Map (java.util.Map)2 Objects (java.util.Objects)2 ManagedKafkaAgentStatus (org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus)2 SequentialTest (org.bf2.systemtest.framework.SequentialTest)2 Context (io.javaoperatorsdk.operator.api.Context)1 Controller (io.javaoperatorsdk.operator.api.Controller)1 DeleteControl (io.javaoperatorsdk.operator.api.DeleteControl)1 ResourceController (io.javaoperatorsdk.operator.api.ResourceController)1 UpdateControl (io.javaoperatorsdk.operator.api.UpdateControl)1 EventSourceManager (io.javaoperatorsdk.operator.processing.event.EventSourceManager)1 Counted (io.micrometer.core.annotation.Counted)1 Timed (io.micrometer.core.annotation.Timed)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1