Search in sources :

Example 41 with Status

use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaAgentControllerTest method shouldCreateStatus.

@Test
void shouldCreateStatus() {
    // try without an agent - nothing should happen
    mkaController.statusUpdateLoop();
    ManagedKafkaAgent dummyInstance = ManagedKafkaAgentResourceClient.getDummyInstance();
    dummyInstance.getMetadata().setNamespace(agentClient.getNamespace());
    assertNull(dummyInstance.getStatus());
    agentClient.create(dummyInstance);
    // should create the status even if
    mkaController.statusUpdateLoop();
    ManagedKafkaAgent agent = agentClient.getByName(agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
    assertNotNull(agent.getStatus());
    agentClient.delete(agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
}
Also used : ManagedKafkaAgent(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgent) Test(org.junit.jupiter.api.Test) QuarkusTest(io.quarkus.test.junit.QuarkusTest)

Example 42 with Status

use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.

@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(1);
    try {
        String mkAppName = "mk-test-restart-kubeapi";
        ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
        // start restarting kubeapi
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // Create mk using api
        resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
        resourceManager.addResource(extensionContext, mk);
        HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
        LOGGER.info("ManagedKafka {} created", mkAppName);
        // wait for the sync to be up-to-date
        TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
            try {
                String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
                if (statusBody.isEmpty()) {
                    return false;
                }
                ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
                return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
            } catch (Exception e) {
                throw new AssertionError(e);
            }
        });
        // Get status and compare with CR status
        ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
        ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
        AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
        // Get agent status
        ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
        AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
        // Check if managed kafka deployed all components
        AssertUtils.assertManagedKafka(mk);
        // start restarting kubeapi
        executor = Executors.newFixedThreadPool(1);
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // delete mk using api
        res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        ManagedKafkaResourceType.isDeleted(mk);
        LOGGER.info("ManagedKafka {} deleted", mkAppName);
    } finally {
        executor.shutdownNow();
    }
}
Also used : TestUtils(org.bf2.test.TestUtils) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ExecutorService(java.util.concurrent.ExecutorService) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 43 with Status

use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SmokeST method testCreateManagedKafka.

@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-create";
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
    String id = mk.getId();
    // Create mk using api
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    resourceManager.addResource(extensionContext, mk);
    HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    resourceManager.waitResourceCondition(mk, Objects::nonNull);
    mk = resourceManager.waitUntilReady(mk, 300_000);
    LOGGER.info("ManagedKafka {} created", mkAppName);
    // wait for the sync to be up-to-date
    TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
        try {
            String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
            if (statusBody.isEmpty()) {
                return false;
            }
            ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
            return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
        } catch (Exception e) {
            throw new AssertionError(e);
        }
    });
    // Get status and compare with CR status
    ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
    ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
    AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
    // Get agent status
    ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
    AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
    // Check if managed kafka deployed all components
    AssertUtils.assertManagedKafka(mk);
    // delete mk using api
    res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    ManagedKafkaResourceType.isDeleted(mk);
    LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Objects(java.util.Objects) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 44 with Status

use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class PollerTest method testAddDelete.

@Test
public void testAddDelete() {
    ManagedKafka managedKafka = exampleManagedKafka();
    List<ManagedKafka> items = lookup.getLocalManagedKafkas();
    assertEquals(0, items.size());
    assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    assertFalse(items.get(0).getSpec().isDeleted());
    // should do nothing
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    // make sure the remote tracking is there and not marked as deleted
    assertFalse(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
    // try another placement - this shouldn't actually happen, should reject first and the original won't be there
    ManagedKafka nextPlacement = exampleManagedKafka();
    nextPlacement.setPlacementId("xyz");
    nextPlacement.getSpec().getVersions().setStrimzi("?");
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
    managedKafkaSync.syncKafkaClusters();
    // should still be a single placement, and it should be the old one
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    assertNotEquals("?", items.get(0).getSpec().getVersions().getStrimzi());
    // try to remove before marked as deleted, should not be successful
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
    managedKafka.getSpec().setDeleted(true);
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertTrue(items.get(0).getSpec().isDeleted());
    // now the remote tracking should be marked as deleted
    assertTrue(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
    // final removal
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(0, items.size());
    // remote tracking should be gone
    assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
    // if it shows up again need to inform the control plane delete is still needed
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
    managedKafkaSync.syncKafkaClusters();
    // expect there to be a status about the deletion
    ArgumentCaptor<Map<String, ManagedKafkaStatus>> statusCaptor = ArgumentCaptor.forClass(Map.class);
    Mockito.verify(controlPlaneRestClient).updateKafkaClustersStatus(Mockito.eq(CLUSTER_ID), statusCaptor.capture());
    Map<String, ManagedKafkaStatus> status = statusCaptor.getValue();
    assertEquals(1, status.size());
    assertEquals(1, status.get(ID).getConditions().size());
}
Also used : ManagedKafkaList(org.bf2.operator.resources.v1alpha1.ManagedKafkaList) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Map(java.util.Map) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 45 with Status

use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class StrimziManagerTest method testStrimziVersionChange.

@Test
public void testStrimziVersionChange() {
    ManagedKafka mk = ManagedKafka.getDummyInstance(1);
    mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v1");
    Kafka kafka = this.kafkaCluster.kafkaFrom(mk, null);
    kafkaClient.create(kafka);
    // Kafka reconcile not paused and current label version as the ManagedKafka one
    assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getSpec().getVersions().getStrimzi());
    // ManagedKafka and Kafka updated their status information
    mk.setStatus(new ManagedKafkaStatusBuilder().withVersions(new VersionsBuilder().withStrimzi("strimzi-cluster-operator.v1").build()).build());
    kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("Ready").withStatus("True").build()).build());
    kafkaClient.replaceStatus(kafka);
    // ask for a Strimzi version change on ManagedKafka
    mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v2");
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    // Kafka reconcile paused but label is still the current version
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getStatus().getVersions().getStrimzi());
    // nothing should change after an intermediate reconcile
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    // Kafka moves to be paused
    kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("ReconciliationPaused").withStatus("True").build()).build());
    kafkaClient.replaceStatus(kafka);
    kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
    // Kafka reconcile not paused and Kafka label updated to requested Strimzi version
    assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
    // the pause reason should stay until strimzi updates to ready
    assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
    assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), "strimzi-cluster-operator.v2");
}
Also used : ConditionBuilder(io.strimzi.api.kafka.model.status.ConditionBuilder) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) KafkaStatusBuilder(io.strimzi.api.kafka.model.status.KafkaStatusBuilder) VersionsBuilder(org.bf2.operator.resources.v1alpha1.VersionsBuilder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Aggregations

Test (org.junit.jupiter.api.Test)14 ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)12 Objects (java.util.Objects)11 QuarkusTest (io.quarkus.test.junit.QuarkusTest)9 ManagedConnectorBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorBuilder)7 ManagedConnectorSpecBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorSpecBuilder)7 Map (java.util.Map)6 Inject (javax.inject.Inject)6 OperatorSelectorBuilder (org.bf2.cos.fleetshard.api.OperatorSelectorBuilder)6 RegistryData (org.bf2.srs.fleetmanager.storage.sqlPanacheImpl.model.RegistryData)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)5 List (java.util.List)5 ConnectorDeploymentStatus (org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)5 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)5 Quantity (io.fabric8.kubernetes.api.model.Quantity)4 KafkaConnectorBuilder (io.strimzi.api.kafka.model.KafkaConnectorBuilder)4 Transactional (javax.transaction.Transactional)4 ManagedKafkaCondition (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition)4 ManagedKafkaStatus (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus)4