use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method computeStatus.
@ParameterizedTest
@MethodSource
void computeStatus(String connectorState, List<Condition> connectorConditions, List<Condition> connectConditions, String expectedManagedConnectorState, String expectedReason) {
ConnectorStatusSpec status = new ConnectorStatusSpec();
DebeziumOperandSupport.computeStatus(status, new KafkaConnectBuilder().withStatus(new KafkaConnectStatusBuilder().addAllToConditions(connectConditions).build()).build(), new KafkaConnectorBuilder().withStatus(new KafkaConnectorStatusBuilder().addAllToConditions(connectorConditions).addToConnectorStatus("connector", new org.bf2.cos.fleetshard.operator.debezium.model.KafkaConnectorStatusBuilder().withState(connectorState).build()).build()).build());
assertThat(status.getPhase()).isEqualTo(expectedManagedConnectorState);
assertThat(status.getConditions()).anySatisfy(condition -> assertThat(condition).hasFieldOrPropertyWithValue("type", "Ready").hasFieldOrPropertyWithValue("status", null == expectedReason ? "True" : "False").hasFieldOrPropertyWithValue("reason", expectedReason));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class CommonHandler method errorResponse.
static ResponseBuilder errorResponse(Throwable error, StatusType status, String errorMessage) {
if (status.getFamily() == Family.SERVER_ERROR) {
log.errorf(error, "%s %s", error.getClass(), error.getMessage());
} else {
log.warnf("%s %s", error.getClass(), error.getMessage());
}
final int statusCode = status.getStatusCode();
ResponseBuilder response = Response.status(statusCode);
Types.Error errorEntity = new Types.Error();
errorEntity.setCode(statusCode);
if (errorMessage != null) {
errorEntity.setErrorMessage(errorMessage);
} else if (status == Status.INTERNAL_SERVER_ERROR) {
errorEntity.setErrorMessage(status.getReasonPhrase());
} else {
errorEntity.setErrorMessage(error.getMessage());
errorEntity.setClassName(error.getClass().getSimpleName());
}
response.entity(errorEntity);
return response;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaManager method doKafkaUpgradeStabilityCheck.
/**
* Scheduled job to execute the Kafka stability check
*
* @param managedKafka ManagedKafka instance
*/
void doKafkaUpgradeStabilityCheck(ManagedKafka managedKafka) {
log.infof("[%s/%s] Kafka upgrade stability check", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName());
CanaryService canaryService = RestClientBuilder.newBuilder().baseUri(URI.create("http://" + AbstractCanary.canaryName(managedKafka) + "." + managedKafka.getMetadata().getNamespace() + ":8080")).connectTimeout(10, TimeUnit.SECONDS).readTimeout(30, TimeUnit.SECONDS).build(CanaryService.class);
try {
Status status = canaryService.getStatus();
log.infof("[%s/%s] Canary status: timeWindow %d - percentage %d", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), status.getConsuming().getTimeWindow(), status.getConsuming().getPercentage());
if (status.getConsuming().getPercentage() > consumingPercentageThreshold) {
log.debugf("[%s/%s] Remove Kafka upgrade start/end annotations", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName());
managedKafkaClient.inNamespace(managedKafka.getMetadata().getNamespace()).withName(managedKafka.getMetadata().getName()).edit(mk -> new ManagedKafkaBuilder(mk).editMetadata().removeFromAnnotations(KAFKA_UPGRADE_START_TIMESTAMP_ANNOTATION).removeFromAnnotations(KAFKA_UPGRADE_END_TIMESTAMP_ANNOTATION).endMetadata().build());
} else {
log.warnf("[%s/%s] Reported consuming percentage %d less than %d threshold", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), status.getConsuming().getPercentage(), consumingPercentageThreshold);
managedKafkaClient.inNamespace(managedKafka.getMetadata().getNamespace()).withName(managedKafka.getMetadata().getName()).edit(mk -> new ManagedKafkaBuilder(mk).editMetadata().removeFromAnnotations(KAFKA_UPGRADE_END_TIMESTAMP_ANNOTATION).endMetadata().build());
}
// trigger a reconcile on the ManagedKafka instance to push checking if next step
// Kafka IBP upgrade is needed or another stability check
informerManager.resyncManagedKafka(managedKafka);
} catch (Exception e) {
log.errorf("[%s/%s] Error while checking Kafka upgrade stability", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), e);
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaAgentController method statusUpdateLoop.
@Timed(value = "controller.status.update", extraTags = { "resource", "ManagedKafkaAgent" }, description = "Time spent processing status updates")
@Counted(value = "controller.status.update", extraTags = { "resource", "ManagedKafkaAgent" }, description = "The number of status updates")
@Scheduled(every = "{agent.status.interval}", concurrentExecution = ConcurrentExecution.SKIP)
void statusUpdateLoop() {
ManagedKafkaAgent resource = this.agentClient.getByName(this.agentClient.getNamespace(), ManagedKafkaAgentResourceClient.RESOURCE_NAME);
if (resource != null) {
// check and reinstate if the observability config changed
this.observabilityManager.createOrUpdateObservabilitySecret(resource.getSpec().getObservability(), resource);
log.debugf("Tick to update Kafka agent Status in namespace %s", this.agentClient.getNamespace());
resource.setStatus(buildStatus(resource));
this.agentClient.replaceStatus(resource);
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaController method updateManagedKafkaStatus.
/**
* Extract from the current KafkaInstance overall status (Kafka, Canary and AdminServer)
* a corresponding list of ManagedKafkaCondition(s) to set on the ManagedKafka status
*
* @param managedKafka ManagedKafka instance
*/
private void updateManagedKafkaStatus(ManagedKafka managedKafka) {
// add status if not already available on the ManagedKafka resource
ManagedKafkaStatus status = Objects.requireNonNullElse(managedKafka.getStatus(), new ManagedKafkaStatusBuilder().build());
status.setUpdatedTimestamp(ConditionUtils.iso8601Now());
managedKafka.setStatus(status);
// add conditions if not already available
List<ManagedKafkaCondition> managedKafkaConditions = managedKafka.getStatus().getConditions();
if (managedKafkaConditions == null) {
managedKafkaConditions = new ArrayList<>();
status.setConditions(managedKafkaConditions);
}
Optional<ManagedKafkaCondition> optReady = ConditionUtils.findManagedKafkaCondition(managedKafkaConditions, ManagedKafkaCondition.Type.Ready);
ManagedKafkaCondition ready = null;
if (optReady.isPresent()) {
ready = optReady.get();
} else {
ready = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, Status.Unknown);
managedKafkaConditions.add(ready);
}
// a not valid ManagedKafka skips the handling of it, so the status will report an error condition
OperandReadiness readiness = this.validity(managedKafka).orElse(kafkaInstance.getReadiness(managedKafka));
ConditionUtils.updateConditionStatus(ready, readiness.getStatus(), readiness.getReason(), readiness.getMessage());
// routes should always be set on the CR status, even if it's just an empty list
status.setRoutes(List.of());
int replicas = kafkaCluster.getReplicas(managedKafka);
if (ingressControllerManagerInstance.isResolvable()) {
IngressControllerManager ingressControllerManager = ingressControllerManagerInstance.get();
List<ManagedKafkaRoute> routes = ingressControllerManager.getManagedKafkaRoutesFor(managedKafka);
// expect route for each broker + 1 for bootstrap URL + 1 for Admin API server
int expectedNumRoutes = replicas + NUM_NON_BROKER_ROUTES;
if (routes.size() >= expectedNumRoutes && routes.stream().noneMatch(r -> "".equals(r.getRouter()))) {
status.setRoutes(routes);
}
}
if (Status.True.equals(readiness.getStatus())) {
status.setCapacity(new ManagedKafkaCapacityBuilder(managedKafka.getSpec().getCapacity()).withMaxDataRetentionSize(kafkaInstance.getKafkaCluster().calculateRetentionSize(managedKafka)).build());
// the versions in the status are updated incrementally copying the spec only when each stage ends
VersionsBuilder versionsBuilder = status.getVersions() != null ? new VersionsBuilder(status.getVersions()) : new VersionsBuilder(managedKafka.getSpec().getVersions());
if (!Reason.StrimziUpdating.equals(readiness.getReason()) && !this.strimziManager.hasStrimziChanged(managedKafka)) {
versionsBuilder.withStrimzi(managedKafka.getSpec().getVersions().getStrimzi());
}
if (!Reason.KafkaUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaVersionChanged(managedKafka)) {
versionsBuilder.withKafka(managedKafka.getSpec().getVersions().getKafka());
}
if (!Reason.KafkaIbpUpdating.equals(readiness.getReason()) && !this.kafkaManager.hasKafkaIbpVersionChanged(managedKafka)) {
String kafkaIbp = managedKafka.getSpec().getVersions().getKafkaIbp() != null ? managedKafka.getSpec().getVersions().getKafkaIbp() : AbstractKafkaCluster.getKafkaIbpVersion(managedKafka.getSpec().getVersions().getKafka());
versionsBuilder.withKafkaIbp(kafkaIbp);
}
status.setVersions(versionsBuilder.build());
status.setAdminServerURI(kafkaInstance.getAdminServer().uri(managedKafka));
status.setServiceAccounts(managedKafka.getSpec().getServiceAccounts());
}
}
Aggregations