use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentControllerTest method shouldCreateStatus.
@Test
void shouldCreateStatus() {
// try without an agent - nothing should happen
mkaController.statusUpdateLoop();
ManagedKafkaAgent dummyInstance = ManagedKafkaAgentResourceClient.getDummyInstance();
dummyInstance.getMetadata().setNamespace(agentClient.getNamespace());
assertNull(dummyInstance.getStatus());
agentClient.create(dummyInstance);
// should create the status even if
mkaController.statusUpdateLoop();
ManagedKafkaAgent agent = agentClient.getByName(agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
assertNotNull(agent.getStatus());
agentClient.delete(agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
}
use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SmokeST method testCreateManagedKafka.
@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-create";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
String id = mk.getId();
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
resourceManager.waitResourceCondition(mk, Objects::nonNull);
mk = resourceManager.waitUntilReady(mk, 300_000);
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class PollerTest method testAddDelete.
@Test
public void testAddDelete() {
ManagedKafka managedKafka = exampleManagedKafka();
List<ManagedKafka> items = lookup.getLocalManagedKafkas();
assertEquals(0, items.size());
assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
managedKafkaSync.syncKafkaClusters();
items = lookup.getLocalManagedKafkas();
assertEquals(1, items.size());
assertFalse(items.get(0).getSpec().isDeleted());
// should do nothing
managedKafkaSync.syncKafkaClusters();
items = lookup.getLocalManagedKafkas();
assertEquals(1, items.size());
// make sure the remote tracking is there and not marked as deleted
assertFalse(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
// try another placement - this shouldn't actually happen, should reject first and the original won't be there
ManagedKafka nextPlacement = exampleManagedKafka();
nextPlacement.setPlacementId("xyz");
nextPlacement.getSpec().getVersions().setStrimzi("?");
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
managedKafkaSync.syncKafkaClusters();
// should still be a single placement, and it should be the old one
items = lookup.getLocalManagedKafkas();
assertEquals(1, items.size());
assertNotEquals("?", items.get(0).getSpec().getVersions().getStrimzi());
// try to remove before marked as deleted, should not be successful
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
managedKafkaSync.syncKafkaClusters();
items = lookup.getLocalManagedKafkas();
assertEquals(1, items.size());
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Arrays.asList(managedKafka, nextPlacement)));
managedKafka.getSpec().setDeleted(true);
managedKafkaSync.syncKafkaClusters();
items = lookup.getLocalManagedKafkas();
assertTrue(items.get(0).getSpec().isDeleted());
// now the remote tracking should be marked as deleted
assertTrue(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)).getSpec().isDeleted());
// final removal
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList());
managedKafkaSync.syncKafkaClusters();
items = lookup.getLocalManagedKafkas();
assertEquals(0, items.size());
// remote tracking should be gone
assertNull(controlPlane.getDesiredState(ControlPlane.managedKafkaKey(managedKafka)));
// if it shows up again need to inform the control plane delete is still needed
Mockito.when(controlPlaneRestClient.getKafkaClusters(CLUSTER_ID)).thenReturn(new ManagedKafkaList(Collections.singletonList(managedKafka)));
managedKafkaSync.syncKafkaClusters();
// expect there to be a status about the deletion
ArgumentCaptor<Map<String, ManagedKafkaStatus>> statusCaptor = ArgumentCaptor.forClass(Map.class);
Mockito.verify(controlPlaneRestClient).updateKafkaClustersStatus(Mockito.eq(CLUSTER_ID), statusCaptor.capture());
Map<String, ManagedKafkaStatus> status = statusCaptor.getValue();
assertEquals(1, status.size());
assertEquals(1, status.get(ID).getConditions().size());
}
use of org.bf2.operator.clients.canary.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class StrimziManagerTest method testStrimziVersionChange.
@Test
public void testStrimziVersionChange() {
ManagedKafka mk = ManagedKafka.getDummyInstance(1);
mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v1");
Kafka kafka = this.kafkaCluster.kafkaFrom(mk, null);
kafkaClient.create(kafka);
// Kafka reconcile not paused and current label version as the ManagedKafka one
assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getSpec().getVersions().getStrimzi());
// ManagedKafka and Kafka updated their status information
mk.setStatus(new ManagedKafkaStatusBuilder().withVersions(new VersionsBuilder().withStrimzi("strimzi-cluster-operator.v1").build()).build());
kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("Ready").withStatus("True").build()).build());
kafkaClient.replaceStatus(kafka);
// ask for a Strimzi version change on ManagedKafka
mk.getSpec().getVersions().setStrimzi("strimzi-cluster-operator.v2");
kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
// Kafka reconcile paused but label is still the current version
assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), mk.getStatus().getVersions().getStrimzi());
// nothing should change after an intermediate reconcile
kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
// Kafka moves to be paused
kafka.setStatus(new KafkaStatusBuilder().withConditions(new ConditionBuilder().withType("ReconciliationPaused").withStatus("True").build()).build());
kafkaClient.replaceStatus(kafka);
kafka = this.kafkaCluster.kafkaFrom(mk, kafka);
// Kafka reconcile not paused and Kafka label updated to requested Strimzi version
assertFalse(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_RECONCILE_ANNOTATION));
// the pause reason should stay until strimzi updates to ready
assertTrue(kafka.getMetadata().getAnnotations().containsKey(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION));
assertEquals(kafka.getMetadata().getLabels().get(this.strimziManager.getVersionLabel()), "strimzi-cluster-operator.v2");
}
Aggregations