use of org.bf2.operator.resources.v1alpha1.ManagedKafkaBuilder in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaManager method addUpgradeTimeStampAnnotation.
/**
* Add a Kafka upgrade related timestamp (current UTC time) to the ManagedKafka instance
*
* @param managedKafka ManagedKafka instance
* @param annotation annotation to add, start or end of Kafka upgrade
*/
private void addUpgradeTimeStampAnnotation(ManagedKafka managedKafka, String annotation) {
log.debugf("[%s/%s] Adding Kafka upgrade %s timestamp annotation", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), annotation);
managedKafkaClient.inNamespace(managedKafka.getMetadata().getNamespace()).withName(managedKafka.getMetadata().getName()).edit(mk -> new ManagedKafkaBuilder(mk).editMetadata().addToAnnotations(annotation, ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT)).endMetadata().build());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaBuilder in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method deployCluster.
/**
* TODO: if/when this will need to test bin packing, then we'll separate the profile setting from deployCluster
*
* Deploy a Kafka cluster using this provisioner.
* @param profile
*/
public ManagedKafkaDeployment deployCluster(String name, ManagedKafkaCapacity managedKafkaCapacity, KafkaInstanceConfiguration profile) throws Exception {
// set and validate the strimzi version
String strimziVersion = PerformanceEnvironment.STRIMZI_VERSION;
if (strimziVersion == null) {
strimziVersion = strimziVersions.get(strimziVersions.size() - 1);
}
String kafkaVersion = PerformanceEnvironment.KAFKA_VERSION;
if (kafkaVersion == null) {
kafkaVersion = getKafkaVersion(strimziVersion);
}
List<String> versions = strimziManager.getVersions();
if (!versions.contains(strimziVersion)) {
throw new IllegalStateException(String.format("Strimzi version %s is not in the set of installed versions %s", strimziVersion, versions));
}
int replicas = 3;
if (managedKafkaCapacity.getMaxPartitions() != null) {
replicas = (int) (3 * Math.ceil(managedKafkaCapacity.getMaxPartitions() / (double) profile.getKafka().getPartitionCapacity()));
}
applyProfile(profile, replicas);
String namespace = Constants.KAFKA_NAMESPACE;
ManagedKafka managedKafka = new ManagedKafkaBuilder().withNewMetadata().withName(name).withNamespace(namespace).endMetadata().withSpec(new ManagedKafkaSpecBuilder().withCapacity(managedKafkaCapacity).withNewEndpoint().withBootstrapServerHost(String.format("%s-kafka-bootstrap-%s.%s", name, namespace, domain)).withNewTls().withCert(tlsConfig.getCert()).withKey(tlsConfig.getKey()).endTls().endEndpoint().withNewVersions().withKafka(kafkaVersion).withStrimzi(strimziVersion).endVersions().build()).build();
clusters.add(managedKafka);
LOGGER.info("Deploying {}", Serialization.asYaml(managedKafka));
ManagedKafkaDeployment kafkaDeployment = deployCluster(namespace, managedKafka);
kafkaDeployment.start();
return kafkaDeployment;
}
Aggregations