use of org.bf2.operator.operands.KafkaCluster in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class IngressControllerManagerTest method testSummarize.
@Test
public void testSummarize() {
ManagedKafka mk = ManagedKafka.getDummyInstance(1);
Kafka kafka = this.kafkaCluster.kafkaFrom(mk, null);
int replicas = kafka.getSpec().getKafka().getReplicas();
int instances = 4;
LongSummaryStatistics egress = IngressControllerManager.summarize(Collections.nCopies(instances, kafka), KafkaCluster::getFetchQuota, () -> {
throw new AssertionError();
});
long singleEgress = Quantity.getAmountInBytes(mk.getSpec().getCapacity().getEgressPerSec()).longValue() / replicas * replicas;
assertEquals(singleEgress, egress.getMax());
assertEquals(singleEgress * instances, egress.getSum());
}
use of org.bf2.operator.operands.KafkaCluster in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method setup.
private void setup() throws Exception {
readResults();
if (profilingResult.name == null) {
profilingResult.name = "profile-" + Environment.DATE_FORMAT.format(LocalDateTime.now());
}
logDir = new File("target", profilingResult.name);
Files.createDirectories(logDir.toPath());
kafkaCluster = KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.KAFKA_KUBECONFIG);
profilingResult.kafkaNodeType = kafkaCluster.getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
kafkaProvisioner = ManagedKafkaProvisioner.create(kafkaCluster);
kafkaProvisioner.setup();
omb = new OMB(KubeClusterResource.connectToKubeCluster(PerformanceEnvironment.OMB_KUBECONFIG));
omb.install(kafkaProvisioner.getTlsConfig());
// TODO: if there is an existing result, make sure it's the same test setup
profilingResult.ombNodeType = omb.getOmbCluster().getWorkerNodes().get(0).getMetadata().getLabels().get("node.kubernetes.io/instance-type");
profilingResult.ombWorkerNodes = omb.getOmbCluster().getWorkerNodes().size();
AvailableResources resources = getMinAvailableResources(omb.getOmbCluster().getWorkerNodes().stream());
// use all available resources on the worker nodes with 2 workers per node
// if (resources.memoryBytes > 16*ONE_GB || resources.memoryBytes < 8*ONE_GB) {
// throw new IllegalStateException("Client instance types are expected to have 16 GB");
// }
// assume instead resources that will fit on 2xlarge or xlarge
resources.cpuMillis = Math.min(6400, resources.cpuMillis);
resources.memoryBytes = Math.min(12 * ONE_GB, resources.memoryBytes);
omb.setWorkerCpu(Quantity.parse(resources.cpuMillis / 2 + "m"));
omb.setWorkerContainerMemory(Quantity.parse(String.valueOf(resources.memoryBytes / 2)));
profilingResult.ombWorkerCpu = omb.getWorkerCpu();
profilingResult.ombWorkerMemory = omb.getWorkerContainerMemory();
LOGGER.info("OMB Workers will use {} cpu and {} memory requests", omb.getWorkerCpu(), omb.getWorkerContainerMemory());
if (profilingResult.completedStep == null) {
installedProvisioner = true;
kafkaProvisioner.install();
writeResults(Step.SETUP);
}
}
use of org.bf2.operator.operands.KafkaCluster in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method deployIfNeeded.
private void deployIfNeeded(String name) throws Exception {
ManagedKafka mk = null;
Resource<ManagedKafka> mkResource = kafkaCluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE).withName(name);
try {
mk = mkResource.get();
} catch (KubernetesClientException e) {
}
ManagedKafkaDeployment kd = null;
if (mk == null) {
if (!installedProvisioner) {
// TODO: come up with a better resume logic here - it currently has to recreate everything
installedProvisioner = true;
kafkaProvisioner.install();
}
kafkaProvisioner.removeClusters(true);
kd = kafkaProvisioner.deployCluster(name, profilingResult.capacity, profilingResult.config);
} else {
// TODO validate config / capacity
kd = new ManagedKafkaDeployment(mk, kafkaCluster);
kd.start();
}
instanceBootstrap = kd.waitUntilReady();
}
use of org.bf2.operator.operands.KafkaCluster in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class IngressControllerManager method ingressControllersFrom.
private void ingressControllersFrom(Map<String, IngressController> ingressControllers, String clusterDomain, List<Kafka> kafkas, long connectionDemand) {
LongSummaryStatistics egress = summarize(kafkas, KafkaCluster::getFetchQuota, () -> config.getKafka().getEgressPerSec());
LongSummaryStatistics ingress = summarize(kafkas, KafkaCluster::getProduceQuota, () -> config.getKafka().getIngressPerSec());
ingressControllers.entrySet().stream().forEach(e -> {
String zone = e.getKey();
String kasZone = "kas-" + zone;
String domain = kasZone + "." + clusterDomain;
int replicas = numReplicasForZone(zone, nodeInformer.getList(), ingress, egress, connectionDemand);
Map<String, String> routeMatchLabel = Map.of("managedkafka.bf2.org/" + kasZone, "true");
LabelSelector routeSelector = new LabelSelector(null, routeMatchLabel);
routeMatchLabels.putAll(routeMatchLabel);
buildIngressController(kasZone, domain, e.getValue(), replicas, routeSelector, zone);
});
}
use of org.bf2.operator.operands.KafkaCluster in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isReconciliationPaused.
public boolean isReconciliationPaused(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
boolean isReconciliationPaused = kafka != null && kafka.getStatus() != null && hasKafkaCondition(kafka, c -> c.getType() != null && "ReconciliationPaused".equals(c.getType()) && "True".equals(c.getStatus()));
log.tracef("KafkaCluster isReconciliationPaused = %s", isReconciliationPaused);
return isReconciliationPaused;
}
Aggregations