use of org.bf2.test.TestUtils in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
Aggregations