use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method kafkaFrom.
/* test */
@Override
public Kafka kafkaFrom(ManagedKafka managedKafka, Kafka current) {
KafkaBuilder builder = current != null ? new KafkaBuilder(current) : new KafkaBuilder();
int actualReplicas = getBrokerReplicas(managedKafka, current);
int desiredReplicas = getBrokerReplicas(managedKafka, null);
KafkaBuilder kafkaBuilder = builder.editOrNewMetadata().withName(kafkaClusterName(managedKafka)).withNamespace(kafkaClusterNamespace(managedKafka)).withLabels(buildKafkaLabels(managedKafka)).withAnnotations(buildKafkaAnnotations(managedKafka, current)).addToAnnotations(REPLICAS, String.valueOf(desiredReplicas)).endMetadata().editOrNewSpec().editOrNewKafka().withVersion(this.kafkaManager.currentKafkaVersion(managedKafka)).withConfig(buildKafkaConfig(managedKafka, current)).withReplicas(actualReplicas).withResources(buildKafkaResources(managedKafka)).withJvmOptions(buildKafkaJvmOptions(managedKafka)).withStorage(buildKafkaStorage(managedKafka, current)).withListeners(buildListeners(managedKafka, actualReplicas)).withRack(buildKafkaRack(managedKafka)).withTemplate(buildKafkaTemplate(managedKafka)).withMetricsConfig(buildKafkaMetricsConfig(managedKafka)).withAuthorization(buildKafkaAuthorization(managedKafka)).withImage(kafkaImage.orElse(null)).withExternalLogging(buildKafkaExternalLogging(managedKafka)).endKafka().editOrNewZookeeper().withReplicas(this.config.getZookeeper().getReplicas()).withStorage((SingleVolumeStorage) buildZooKeeperStorage(current)).withResources(buildZooKeeperResources(managedKafka)).withJvmOptions(buildZooKeeperJvmOptions(managedKafka)).withTemplate(buildZookeeperTemplate(managedKafka)).withMetricsConfig(buildZooKeeperMetricsConfig(managedKafka)).withImage(zookeeperImage.orElse(null)).withExternalLogging(buildZookeeperExternalLogging(managedKafka)).endZookeeper().withKafkaExporter(buildKafkaExporter(managedKafka)).endSpec();
Kafka kafka = this.upgrade(managedKafka, kafkaBuilder);
// setting the ManagedKafka as owner of the Kafka resource is needed
// by the operator sdk to handle events on the Kafka resource properly
OperandUtils.setAsOwner(managedKafka, kafka);
return kafka;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method buildKafkaStorage.
private Storage buildKafkaStorage(ManagedKafka managedKafka, Kafka current) {
PersistentClaimStorageBuilder builder = new PersistentClaimStorageBuilder().withId(JBOD_VOLUME_ID).withSize(getAdjustedMaxDataRetentionSize(managedKafka, current).getAmount()).withDeleteClaim(DELETE_CLAIM);
Optional.ofNullable(current).map(Kafka::getSpec).map(KafkaSpec::getKafka).map(KafkaClusterSpec::getStorage).map(this::getExistingVolumesFromJbodStorage).ifPresentOrElse(existingVolumes -> existingVolumes.stream().forEach(v -> handleExistingVolume(v, builder)), () -> builder.withStorageClass(config.getKafka().getStorageClass()));
return new JbodStorageBuilder().withVolumes(builder.build()).build();
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method addQuotaConfig.
private void addQuotaConfig(ManagedKafka managedKafka, Kafka current, Map<String, Object> config) {
config.put("client.quota.callback.class", IO_STRIMZI_KAFKA_QUOTA_STATIC_QUOTA_CALLBACK);
// Throttle at Ingress/Egress MB/sec per broker
config.put(QUOTA_PRODUCE, String.valueOf(getIngressBytes(managedKafka, current)));
config.put(QUOTA_FETCH, String.valueOf(getEgressBytes(managedKafka, current)));
// Start throttling when disk is above requested size. Full stop when only storageMinMargin is free.
Quantity maxDataRetentionSize = getAdjustedMaxDataRetentionSize(managedKafka, current);
long hardStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - Quantity.getAmountInBytes(storageMinMargin).longValue();
long softStorageLimit = Quantity.getAmountInBytes(maxDataRetentionSize).longValue() - getStoragePadding(managedKafka, current);
config.put("client.quota.callback.static.storage.soft", String.valueOf(softStorageLimit));
config.put("client.quota.callback.static.storage.hard", String.valueOf(hardStorageLimit));
// Check storage every storageCheckInterval seconds
config.put("client.quota.callback.static.storage.check-interval", String.valueOf(storageCheckInterval));
// Configure the quota plugin so that the canary is not subjected to the quota checks.
Optional<ServiceAccount> canaryServiceAccount = managedKafka.getServiceAccount(ServiceAccount.ServiceAccountName.Canary);
canaryServiceAccount.ifPresent(serviceAccount -> config.put("client.quota.callback.static.excluded.principal.name.list", serviceAccount.getPrincipal()));
config.put("quota.window.num", "30");
config.put("quota.window.size.seconds", "2");
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class IngressControllerManager method getManagedKafkaRoutesFor.
public List<ManagedKafkaRoute> getManagedKafkaRoutesFor(ManagedKafka mk) {
String multiZoneRoute = getIngressControllerDomain("kas");
String bootstrapDomain = mk.getSpec().getEndpoint().getBootstrapServerHost();
return Stream.concat(Stream.of(new ManagedKafkaRoute("bootstrap", "", multiZoneRoute), new ManagedKafkaRoute("admin-server", "admin-server", multiZoneRoute)), routesFor(mk).filter(IS_BROKER_ROUTE).map(r -> {
String router = getIngressControllerDomain("kas-" + getZoneForBrokerRoute(r));
String routePrefix = r.getSpec().getHost().replaceFirst("-" + bootstrapDomain, "");
return new ManagedKafkaRoute(routePrefix, routePrefix, router);
})).sorted(Comparator.comparing(ManagedKafkaRoute::getName)).collect(Collectors.toList());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaManager method doKafkaUpgradeStabilityCheck.
/**
* Scheduled job to execute the Kafka stability check
*
* @param managedKafka ManagedKafka instance
*/
void doKafkaUpgradeStabilityCheck(ManagedKafka managedKafka) {
log.infof("[%s/%s] Kafka upgrade stability check", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName());
CanaryService canaryService = RestClientBuilder.newBuilder().baseUri(URI.create("http://" + AbstractCanary.canaryName(managedKafka) + "." + managedKafka.getMetadata().getNamespace() + ":8080")).connectTimeout(10, TimeUnit.SECONDS).readTimeout(30, TimeUnit.SECONDS).build(CanaryService.class);
try {
Status status = canaryService.getStatus();
log.infof("[%s/%s] Canary status: timeWindow %d - percentage %d", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), status.getConsuming().getTimeWindow(), status.getConsuming().getPercentage());
if (status.getConsuming().getPercentage() > consumingPercentageThreshold) {
log.debugf("[%s/%s] Remove Kafka upgrade start/end annotations", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName());
managedKafkaClient.inNamespace(managedKafka.getMetadata().getNamespace()).withName(managedKafka.getMetadata().getName()).edit(mk -> new ManagedKafkaBuilder(mk).editMetadata().removeFromAnnotations(KAFKA_UPGRADE_START_TIMESTAMP_ANNOTATION).removeFromAnnotations(KAFKA_UPGRADE_END_TIMESTAMP_ANNOTATION).endMetadata().build());
} else {
log.warnf("[%s/%s] Reported consuming percentage %d less than %d threshold", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), status.getConsuming().getPercentage(), consumingPercentageThreshold);
managedKafkaClient.inNamespace(managedKafka.getMetadata().getNamespace()).withName(managedKafka.getMetadata().getName()).edit(mk -> new ManagedKafkaBuilder(mk).editMetadata().removeFromAnnotations(KAFKA_UPGRADE_END_TIMESTAMP_ANNOTATION).endMetadata().build());
}
// trigger a reconcile on the ManagedKafka instance to push checking if next step
// Kafka IBP upgrade is needed or another stability check
informerManager.resyncManagedKafka(managedKafka);
} catch (Exception e) {
log.errorf("[%s/%s] Error while checking Kafka upgrade stability", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), e);
}
}
Aggregations