use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentController method buildStatus.
/**
* TODO: this needs to be replaced with actual metrics
* @return
*/
private ManagedKafkaAgentStatus buildStatus(ManagedKafkaAgent resource) {
ManagedKafkaAgentStatus status = resource.getStatus();
ManagedKafkaCondition readyCondition = null;
if (status != null) {
readyCondition = ConditionUtils.findManagedKafkaCondition(status.getConditions(), Type.Ready).orElse(null);
}
List<StrimziVersionStatus> strimziVersions = this.strimziManager.getStrimziVersions();
log.debugf("Strimzi versions %s", strimziVersions);
// consider the fleetshard operator ready when observability is running and a Strimzi bundle is installed (aka at least one available version)
Status statusValue = this.observabilityManager.isObservabilityRunning() && !strimziVersions.isEmpty() ? ManagedKafkaCondition.Status.True : ManagedKafkaCondition.Status.False;
if (readyCondition == null) {
readyCondition = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, statusValue);
} else {
ConditionUtils.updateConditionStatus(readyCondition, statusValue, null, null);
}
ClusterCapacity total = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity remaining = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
ClusterCapacity delta = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
NodeCounts nodeInfo = new NodeCountsBuilder().withCeiling(0).withCurrent(0).withCurrentWorkLoadMinimum(0).withFloor(0).build();
ClusterResizeInfo resize = new ClusterResizeInfoBuilder().withDelta(delta).withNodeDelta(3).build();
return new ManagedKafkaAgentStatusBuilder().withConditions(status == null ? Arrays.asList(readyCondition) : status.getConditions()).withTotal(total).withRemaining(remaining).withNodeInfo(nodeInfo).withResizeInfo(resize).withUpdatedTimestamp(ConditionUtils.iso8601Now()).withStrimzi(strimziVersions).build();
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ControlPlane method updateAgentStatus.
private void updateAgentStatus() {
log.debug("Updating agent status");
executorService.execute(() -> {
ManagedKafkaAgent localManagedKafkaAgent = localLookup.getLocalManagedKafkaAgent();
if (localManagedKafkaAgent == null) {
return;
}
ManagedKafkaAgentStatus status = localManagedKafkaAgent.getStatus();
if (status == null) {
// as they are not looking for this sync yet as a heartbeat
return;
}
controlPlaneClient.updateStatus(id, status);
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SmokeST method testCreateManagedKafka.
@SequentialTest
void testCreateManagedKafka(ExtensionContext extensionContext) throws Exception {
String mkAppName = "mk-test-create";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
String id = mk.getId();
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
resourceManager.waitResourceCondition(mk, Objects::nonNull);
mk = resourceManager.waitUntilReady(mk, 300_000);
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 30_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(id, syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus managedKafkaAgentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(managedKafkaAgentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SyncApiClient method getLatestAvailableKafkaVersion.
public static String getLatestAvailableKafkaVersion(Supplier<ManagedKafkaAgentStatus> statusSupplier, String strimziVersion) {
return getKafkaVersions(statusSupplier, strimziVersion).sorted((a, b) -> {
ComparableVersion aVersion = new ComparableVersion(a);
ComparableVersion bVersion = new ComparableVersion(b);
return aVersion.compareTo(bVersion);
}).reduce((first, second) -> second).get();
}
Aggregations