use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaDeployment method start.
public void start() {
ManagedKafkaResourceType type = new ManagedKafkaResourceType();
Resource<ManagedKafka> resource = type.resource(cluster.kubeClient(), managedKafka);
Predicate<ManagedKafka> readyCheck = type.readiness(cluster.kubeClient());
readyFuture = TestUtils.asyncWaitFor("cluster ready", 1000, 600_000, () -> readyCheck.test(resource.get()));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method deployCluster.
ManagedKafkaDeployment deployCluster(String namespace, ManagedKafka managedKafka) throws Exception {
var configMapClient = cluster.kubeClient().client().configMaps().inNamespace(namespace);
// set kafka and zookeeper metrics
if (PerformanceEnvironment.ENABLE_METRICS) {
ConfigMap kafkaMetrics = configMapClient.load(ManagedKafkaProvisioner.class.getClassLoader().getResource("kafka-metrics.yaml")).get();
kafkaMetrics.getMetadata().setName(managedKafka.getMetadata().getName() + "-kafka-metrics");
configMapClient.createOrReplace(kafkaMetrics);
ConfigMap zookeeperMetrics = configMapClient.load(ManagedKafkaProvisioner.class.getClassLoader().getResource("zookeeper-metrics.yaml")).get();
zookeeperMetrics.getMetadata().setName(managedKafka.getMetadata().getName() + "-zookeeper-metrics");
configMapClient.createOrReplace(zookeeperMetrics);
}
// create the managed kafka
var managedKakfaClient = cluster.kubeClient().client().resources(ManagedKafka.class);
managedKafka = managedKakfaClient.inNamespace(namespace).createOrReplace(managedKafka);
var kafkaClient = cluster.kubeClient().client().resources(Kafka.class).inNamespace(namespace).withName(managedKafka.getMetadata().getName());
org.bf2.test.TestUtils.waitFor("kafka resource", 1_000, 300_000, () -> kafkaClient.get() != null);
// track the result
Kafka kafka = kafkaClient.require();
LOGGER.info("Created Kafka {}", Serialization.asYaml(kafka));
return new ManagedKafkaDeployment(managedKafka, cluster);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method removeClusters.
/**
* Removes kafka cluster
*
* @throws IOException
*/
public void removeClusters(boolean all) throws IOException {
var client = cluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE);
List<ManagedKafka> kafkas = clusters;
if (all) {
kafkas = client.list().getItems();
}
Iterator<ManagedKafka> kafkaIterator = clusters.iterator();
while (kafkaIterator.hasNext()) {
ManagedKafka k = kafkaIterator.next();
LOGGER.info("Removing cluster {}", k.getMetadata().getName());
client.withName(k.getMetadata().getName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
}
for (ManagedKafka k : kafkas) {
org.bf2.test.TestUtils.waitFor("await delete deployment", 1_000, 600_000, () -> client.withName(k.getMetadata().getName()).get() == null);
}
clusters.clear();
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method deployCluster.
/**
* TODO: if/when this will need to test bin packing, then we'll separate the profile setting from deployCluster
*
* Deploy a Kafka cluster using this provisioner.
* @param profile
*/
public ManagedKafkaDeployment deployCluster(String name, ManagedKafkaCapacity managedKafkaCapacity, KafkaInstanceConfiguration profile) throws Exception {
// set and validate the strimzi version
String strimziVersion = PerformanceEnvironment.STRIMZI_VERSION;
if (strimziVersion == null) {
strimziVersion = strimziVersions.get(strimziVersions.size() - 1);
}
String kafkaVersion = PerformanceEnvironment.KAFKA_VERSION;
if (kafkaVersion == null) {
kafkaVersion = getKafkaVersion(strimziVersion);
}
List<String> versions = strimziManager.getVersions();
if (!versions.contains(strimziVersion)) {
throw new IllegalStateException(String.format("Strimzi version %s is not in the set of installed versions %s", strimziVersion, versions));
}
int replicas = 3;
if (managedKafkaCapacity.getMaxPartitions() != null) {
replicas = (int) (3 * Math.ceil(managedKafkaCapacity.getMaxPartitions() / (double) profile.getKafka().getPartitionCapacity()));
}
applyProfile(profile, replicas);
String namespace = Constants.KAFKA_NAMESPACE;
ManagedKafka managedKafka = new ManagedKafkaBuilder().withNewMetadata().withName(name).withNamespace(namespace).endMetadata().withSpec(new ManagedKafkaSpecBuilder().withCapacity(managedKafkaCapacity).withNewEndpoint().withBootstrapServerHost(String.format("%s-kafka-bootstrap-%s.%s", name, namespace, domain)).withNewTls().withCert(tlsConfig.getCert()).withKey(tlsConfig.getKey()).endTls().endEndpoint().withNewVersions().withKafka(kafkaVersion).withStrimzi(strimziVersion).endVersions().build()).build();
clusters.add(managedKafka);
LOGGER.info("Deploying {}", Serialization.asYaml(managedKafka));
ManagedKafkaDeployment kafkaDeployment = deployCluster(namespace, managedKafka);
kafkaDeployment.start();
return kafkaDeployment;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaBrokerPerNode.
@Test
void testManagedKafkaToKafkaBrokerPerNode() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
config.getKafka().setOneInstancePerNode(true);
config.getKafka().setColocateWithZookeeper(true);
config.getExporter().setColocateWithZookeeper(true);
try {
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka.getSpec().getKafka().getTemplate(), "/expected/broker-per-node-kafka.yml");
diffToExpected(kafka.getSpec().getKafkaExporter().getTemplate(), "/expected/broker-per-node-exporter.yml");
diffToExpected(kafka.getSpec().getZookeeper().getTemplate(), "/expected/broker-per-node-zookeeper.yml");
} finally {
config.getKafka().setOneInstancePerNode(false);
config.getKafka().setColocateWithZookeeper(false);
config.getExporter().setColocateWithZookeeper(false);
}
}
Aggregations