use of org.bf2.systemtest.framework.SequentialTest in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
use of org.bf2.systemtest.framework.SequentialTest in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SmokeST method testCreateManagedKafka.
@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-create";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
String id = mk.getId();
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
resourceManager.waitResourceCondition(mk, Objects::nonNull);
mk = resourceManager.waitUntilReady(mk, 300_000);
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
use of org.bf2.systemtest.framework.SequentialTest in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class UpgradeST method testUpgradeStrimziVersion.
@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-upgrade";
LOGGER.info("Create namespace");
resourceManager.createResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
String startVersion = SyncApiClient.getPreviousStrimziVersion(syncEndpoint);
String kafkaVersion = SyncApiClient.getLatestKafkaVersion(syncEndpoint, startVersion);
LOGGER.info("Create managedkafka with version {}", startVersion);
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, startVersion, kafkaVersion);
mk = resourceManager.createResource(extensionContext, mk);
AssertUtils.assertManagedKafka(mk);
LOGGER.info("Upgrade to {}", latestStrimziVersion);
mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, kafkaVersion);
mk = resourceManager.createResource(extensionContext, mk);
if (!ManagedKafkaResourceType.isDevKafka(mk)) {
resourceManager.waitResourceCondition(mk, m -> {
String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
if (reason != null) {
return reason.equals(ManagedKafkaCondition.Reason.StrimziUpdating.toString());
}
return false;
}, TimeUnit.MINUTES.toMillis(5));
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null, TimeUnit.MINUTES.toMillis(10));
}
TestUtils.waitFor("MK is upgraded", TimeUnit.SECONDS.toMillis(20), TimeUnit.MINUTES.toMillis(10), () -> {
try {
assertEquals(latestStrimziVersion, ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get().getStatus().getVersions().getStrimzi());
return true;
} catch (AssertionError err) {
return false;
}
});
}
use of org.bf2.systemtest.framework.SequentialTest in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SuiteUnitTest method testExecutorError.
@SequentialTest
void testExecutorError() {
ExecBuilder command = Exec.builder().withCommand("ppppeeeepppaaa", "jenda").logToOutput(false).throwErrors(true).timeout(60);
assertThrows(KubeClusterException.class, command::exec);
}
use of org.bf2.systemtest.framework.SequentialTest in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class OperatorST method testUpgradeStrimziVersion.
@SequentialTest
void testUpgradeStrimziVersion(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-upgrade";
LOGGER.info("Create namespace");
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
String startVersion = strimziVersions.get(strimziVersions.size() - 2);
LOGGER.info("Create managedkafka with version {}", startVersion);
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, startVersion, latestKafkaVersion);
mk = resourceManager.createResource(extensionContext, mk);
Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
LOGGER.info("Upgrading managedkafka to version {}", latestStrimziVersion);
mkResource.edit(r -> {
r.getSpec().getVersions().setStrimzi(latestStrimziVersion);
return r;
});
mkResource.waitUntilCondition(m -> {
String reason = ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason();
return ManagedKafkaCondition.Reason.StrimziUpdating.name().equals(reason);
}, 5, TimeUnit.MINUTES);
mkResource.waitUntilCondition(m -> ManagedKafkaResourceType.getCondition(m.getStatus(), ManagedKafkaCondition.Type.Ready).get().getReason() == null && latestStrimziVersion.equals(m.getStatus().getVersions().getStrimzi()), 10, TimeUnit.MINUTES);
}
Aggregations