Search in sources :

Example 56 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SmokeST method testCreateManagedKafka.

@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-create";
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
    String id = mk.getId();
    // Create mk using api
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    resourceManager.addResource(extensionContext, mk);
    HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    resourceManager.waitResourceCondition(mk, Objects::nonNull);
    mk = resourceManager.waitUntilReady(mk, 300_000);
    LOGGER.info("ManagedKafka {} created", mkAppName);
    // wait for the sync to be up-to-date
    TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
        try {
            String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
            if (statusBody.isEmpty()) {
                return false;
            }
            ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
            return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
        } catch (Exception e) {
            throw new AssertionError(e);
        }
    });
    // Get status and compare with CR status
    ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
    ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
    AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
    // Get agent status
    ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
    AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
    // Check if managed kafka deployed all components
    AssertUtils.assertManagedKafka(mk);
    // delete mk using api
    res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
    assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
    ManagedKafkaResourceType.isDeleted(mk);
    LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Objects(java.util.Objects) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 57 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class UpgradeST method testUpgradeStrimziVersion.

@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-upgrade";
    LOGGER.info("Create namespace");
    resourceManager.createResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    String startVersion = SyncApiClient.getPreviousStrimziVersion(syncEndpoint);
    String kafkaVersion = SyncApiClient.getLatestKafkaVersion(syncEndpoint, startVersion);
    LOGGER.info("Create managedkafka with version {}", startVersion);
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, startVersion, kafkaVersion);
    mk = resourceManager.createResource(extensionContext, mk);
    AssertUtils.assertManagedKafka(mk);
    LOGGER.info("Upgrade to {}", latestStrimziVersion);
    mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, kafkaVersion);
    mk = resourceManager.createResource(extensionContext, mk);
    if (!ManagedKafkaResourceType.isDevKafka(mk)) {
        resourceManager.waitResourceCondition(mk, m -> {
            String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
            if (reason != null) {
                return reason.equals(ManagedKafkaCondition.Reason.StrimziUpdating.toString());
            }
            return false;
        }, TimeUnit.MINUTES.toMillis(5));
        resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null, TimeUnit.MINUTES.toMillis(10));
    }
    TestUtils.waitFor("MK is upgraded", TimeUnit.SECONDS.toMillis(20), TimeUnit.MINUTES.toMillis(10), () -> {
        try {
            assertEquals(latestStrimziVersion, ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get().getStatus().getVersions().getStrimzi());
            return true;
        } catch (AssertionError err) {
            return false;
        }
    });
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Example 58 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class PollerTest method testAddDelete.

@Test
public void testAddDelete() {
    ManagedKafka managedKafka = exampleManagedKafka();
    List<ManagedKafka> items = lookup.getLocalManagedKafkas();
    assertEquals(0, items.size());
    assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    assertFalse(items.get(0).getSpec().isDeleted());
    // should do nothing
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    // make sure the remote tracking is there and not marked as deleted
    assertFalse(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
    // try another placement - this shouldn't actually happen, should reject first and the original won't be there
    ManagedKafka nextPlacement = exampleManagedKafka();
    nextPlacement.setPlacementId("xyz");
    nextPlacement.getSpec().getVersions().setStrimzi("?");
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
    managedKafkaSync.syncKafkaClusters();
    // should still be a single placement, and it should be the old one
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    assertNotEquals("?", items.get(0).getSpec().getVersions().getStrimzi());
    // try to remove before marked as deleted, should not be successful
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(1, items.size());
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
    managedKafka.getSpec().setDeleted(true);
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertTrue(items.get(0).getSpec().isDeleted());
    // now the remote tracking should be marked as deleted
    assertTrue(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
    // final removal
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
    managedKafkaSync.syncKafkaClusters();
    items = lookup.getLocalManagedKafkas();
    assertEquals(0, items.size());
    // remote tracking should be gone
    assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
    // if it shows up again need to inform the control plane delete is still needed
    Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
    managedKafkaSync.syncKafkaClusters();
    // expect there to be a status about the deletion
    ArgumentCaptor<Map<String, ManagedKafkaStatus>> statusCaptor = ArgumentCaptor.forClass(Map.class);
    Mockito.verify(controlPlaneRestClient).updateKafkaClustersStatus(Mockito.eq(CLUSTER_ID), statusCaptor.capture());
    Map<String, ManagedKafkaStatus> status = statusCaptor.getValue();
    assertEquals(1, status.size());
    assertEquals(1, status.get(ID).getConditions().size());
}
Also used : ManagedKafkaList(org.bf2.operator.resources.v1alpha1.ManagedKafkaList) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Map(java.util.Map) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 59 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class UpdateTest method testControlPlaneUpdates.

@Test
public void testControlPlaneUpdates() {
    ManagedKafka managedKafka = PollerTest.exampleManagedKafka();
    managedKafka.getMetadata().setNamespace(managedKafka.getId());
    managedKafka.setStatus(new ManagedKafkaStatusBuilder().addNewCondition().withStatus("Installed").endCondition().build());
    managedKafkaClient.create(managedKafka);
    controlPlane.updateKafkaClusterStatus(PollerTest.exampleManagedKafka(), managedKafka);
    assertEquals("Installed", getUpdates().getValue().get(PollerTest.ID).getConditions().get(0).getStatus());
    // simulate a resync
    // for now we're just looking for equality
    Mockito.clearInvocations(controlPlaneRestClient);
    controlPlane.updateKafkaClusterStatus(managedKafka, managedKafka);
    // should not be sent
    Mockito.verifyNoInteractions(controlPlaneRestClient);
    // send everything
    controlPlane.sendResync();
    ArgumentCaptor<Map<String, ManagedKafkaStatus>> statusCaptor = getUpdates();
    assertEquals("Installed", statusCaptor.getValue().get(PollerTest.ID).getConditions().get(0).getStatus());
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) Map(java.util.Map) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 60 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class AssertUtils method assertManagedKafka.

public static void assertManagedKafka(ManagedKafka mk) {
    KubeClient kube = KubeClient.getInstance();
    assertNotNull(ManagedKafkaResourceType.getOperation().inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName()).get());
    assertTrue(kube.client().pods().inNamespace(mk.getMetadata().getNamespace()).list().getItems().size() > 0);
    assertEquals("Running", ManagedKafkaResourceType.getCanaryPod(mk).getStatus().getPhase());
    assertEquals("Running", ManagedKafkaResourceType.getAdminApiPod(mk).getStatus().getPhase());
    assertEquals(3, ManagedKafkaResourceType.getKafkaPods(mk).size());
    if (!ManagedKafkaResourceType.isDevKafka(mk)) {
        assertEquals(1, ManagedKafkaResourceType.getKafkaExporterPods(mk).size());
    }
    assertEquals(3, ManagedKafkaResourceType.getZookeeperPods(mk).size());
}
Also used : KubeClient(org.bf2.test.k8s.KubeClient)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)67 Kafka (io.strimzi.api.kafka.model.Kafka)30 Test (org.junit.jupiter.api.Test)24 QuarkusTest (io.quarkus.test.junit.QuarkusTest)23 List (java.util.List)16 Map (java.util.Map)15 Inject (javax.inject.Inject)15 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)15 Objects (java.util.Objects)14 Quantity (io.fabric8.kubernetes.api.model.Quantity)11 Optional (java.util.Optional)11 Collectors (java.util.stream.Collectors)10 ApplicationScoped (javax.enterprise.context.ApplicationScoped)9 StrimziManager (org.bf2.operator.managers.StrimziManager)9 Logger (org.jboss.logging.Logger)9 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 ArrayList (java.util.ArrayList)8 Reason (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason)8 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)8 ManagedKafkaUtils.exampleManagedKafka (org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka)8