use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafka.
@Test
void testManagedKafkaToKafka() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
ObjectMapper objectMapper = new ObjectMapper();
KafkaInstanceConfiguration clone = objectMapper.readValue(objectMapper.writeValueAsString(config), KafkaInstanceConfiguration.class);
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka, "/expected/strimzi.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method deployCluster.
/**
* TODO: if/when this will need to test bin packing, then we'll separate the profile setting from deployCluster
*
* Deploy a Kafka cluster using this provisioner.
* @param profile
*/
public ManagedKafkaDeployment deployCluster(String name, ManagedKafkaCapacity managedKafkaCapacity, KafkaInstanceConfiguration profile) throws Exception {
// set and validate the strimzi version
String strimziVersion = PerformanceEnvironment.STRIMZI_VERSION;
if (strimziVersion == null) {
strimziVersion = strimziVersions.get(strimziVersions.size() - 1);
}
String kafkaVersion = PerformanceEnvironment.KAFKA_VERSION;
if (kafkaVersion == null) {
kafkaVersion = getKafkaVersion(strimziVersion);
}
List<String> versions = strimziManager.getVersions();
if (!versions.contains(strimziVersion)) {
throw new IllegalStateException(String.format("Strimzi version %s is not in the set of installed versions %s", strimziVersion, versions));
}
int replicas = 3;
if (managedKafkaCapacity.getMaxPartitions() != null) {
replicas = (int) (3 * Math.ceil(managedKafkaCapacity.getMaxPartitions() / (double) profile.getKafka().getPartitionCapacity()));
}
applyProfile(profile, replicas);
String namespace = Constants.KAFKA_NAMESPACE;
ManagedKafka managedKafka = new ManagedKafkaBuilder().withNewMetadata().withName(name).withNamespace(namespace).endMetadata().withSpec(new ManagedKafkaSpecBuilder().withCapacity(managedKafkaCapacity).withNewEndpoint().withBootstrapServerHost(String.format("%s-kafka-bootstrap-%s.%s", name, namespace, domain)).withNewTls().withCert(tlsConfig.getCert()).withKey(tlsConfig.getKey()).endTls().endEndpoint().withNewVersions().withKafka(kafkaVersion).withStrimzi(strimziVersion).endVersions().build()).build();
clusters.add(managedKafka);
LOGGER.info("Deploying {}", Serialization.asYaml(managedKafka));
ManagedKafkaDeployment kafkaDeployment = deployCluster(namespace, managedKafka);
kafkaDeployment.start();
return kafkaDeployment;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaBrokerPerNode.
@Test
void testManagedKafkaToKafkaBrokerPerNode() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
config.getKafka().setOneInstancePerNode(true);
config.getKafka().setColocateWithZookeeper(true);
config.getExporter().setColocateWithZookeeper(true);
try {
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka.getSpec().getKafka().getTemplate(), "/expected/broker-per-node-kafka.yml");
diffToExpected(kafka.getSpec().getKafkaExporter().getTemplate(), "/expected/broker-per-node-exporter.yml");
diffToExpected(kafka.getSpec().getZookeeper().getTemplate(), "/expected/broker-per-node-zookeeper.yml");
} finally {
config.getKafka().setOneInstancePerNode(false);
config.getKafka().setColocateWithZookeeper(false);
config.getExporter().setColocateWithZookeeper(false);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method sizeInstance.
protected void sizeInstance() throws Exception {
Stream<Node> workerNodes = kafkaCluster.getWorkerNodes().stream();
if (!collocateBrokerWithZookeeper) {
kafkaProvisioner.validateClusterForBrokers(numberOfBrokers, false, workerNodes);
workerNodes = kafkaCluster.getWorkerNodes().stream().filter(n -> n.getSpec().getTaints().stream().anyMatch(t -> t.getKey().equals(ManagedKafkaProvisioner.KAFKA_BROKER_TAINT_KEY)));
}
// note these number seem to change per release - 4.9 reports a different allocatable, than 4.8
AvailableResources resources = getMinAvailableResources(workerNodes);
long cpuMillis = resources.cpuMillis;
long memoryBytes = resources.memoryBytes;
Properties p = new Properties();
try (InputStream is = InstanceProfiler.class.getResourceAsStream("/application.properties")) {
p.load(is);
}
KafkaInstanceConfiguration defaults = Serialization.jsonMapper().convertValue(p, KafkaInstanceConfiguration.class);
// when locating with ZK, then reduce the available resources accordingly
if (collocateBrokerWithZookeeper) {
// earlier code making a guess at the page cache size has been removed - until we can more reliably detect it's effect
// there's no point in making a trade-off between extra container memory and JVM memory
// TODO: could choose a memory size where we can fit even multiples of zookeepers
long zookeeperBytes = Quantity.getAmountInBytes(Quantity.parse(defaults.getZookeeper().getContainerMemory())).longValue();
long zookeeperCpu = Quantity.getAmountInBytes(Quantity.parse(defaults.getZookeeper().getContainerCpu())).movePointRight(3).longValue();
List<Long> additionalPodCpu = new ArrayList<>();
List<Long> additionalPodMemory = new ArrayList<>();
additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getCanary().getContainerCpu())).movePointRight(3).longValue());
additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getCanary().getContainerMemory())).longValue());
additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getAdminserver().getContainerCpu())).movePointRight(3).longValue());
additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getAdminserver().getContainerMemory())).longValue());
additionalPodCpu.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getExporter().getContainerCpu())).movePointRight(3).longValue());
additionalPodMemory.add(Quantity.getAmountInBytes(Quantity.parse(defaults.getExporter().getContainerMemory())).longValue());
LOGGER.info("Total overhead of additional pods {} memory, {} cpu", additionalPodMemory.stream().collect(Collectors.summingLong(Long::valueOf)), additionalPodCpu.stream().collect(Collectors.summingLong(Long::valueOf)));
// actual needs ~ 800Mi and 1075m/1575m cpu over 3 nodes, but worst case is over two. amountNeeded will
// estimate that in a more targeted way - but still simplified
memoryBytes = resources.memoryBytes - density * (zookeeperBytes + amountNeeded(additionalPodMemory));
cpuMillis = resources.cpuMillis - density * (zookeeperCpu + amountNeeded(additionalPodCpu));
// TODO account for possible ingress replica collocation
}
// and if there are eventually pods that need to be collocated, and we don't want to adjust the resources downward
if (density == 1) {
memoryBytes -= 2 * ONE_GB;
cpuMillis -= 500;
} else {
// we can assume a much tighter resource utilization for density 2 - it can fluctuate between releases
// or may require adjustments as other pods are added or pod resource adjustments are made
memoryBytes -= 1 * ONE_GB;
cpuMillis -= 200;
}
memoryBytes = memoryBytes / density;
cpuMillis = cpuMillis / density;
long maxVmBytes = Math.min(memoryBytes - getVMOverheadForContainer(memoryBytes), MAX_KAFKA_VM_SIZE);
if (density > 1) {
maxVmBytes -= 1 * ONE_GB;
}
if (!autoSize) {
long defaultMemory = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getContainerMemory())).longValue();
long defaultCpu = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getContainerCpu())).movePointRight(3).longValue();
long defaultMaxVmBytes = Quantity.getAmountInBytes(Quantity.parse(defaults.getKafka().getJvmXms())).longValue();
LOGGER.info("Calculated kafka sizing {} container memory, {} container cpu, and {} vm memory", memoryBytes, cpuMillis, maxVmBytes);
memoryBytes = defaultMemory;
cpuMillis = defaultCpu;
maxVmBytes = defaultMaxVmBytes;
}
KafkaInstanceConfiguration toUse = new KafkaInstanceConfiguration();
toUse.getKafka().setEnableQuota(false);
AdopterProfile.openListenersAndAccess(toUse);
toUse.getKafka().setContainerCpu(cpuMillis + "m");
toUse.getKafka().setJvmXms(String.valueOf(maxVmBytes));
toUse.getKafka().setContainerMemory(String.valueOf(memoryBytes));
profilingResult.config = toUse;
profilingResult.config.getKafka().setColocateWithZookeeper(collocateBrokerWithZookeeper);
profilingResult.config.getKafka().setMaxConnections(Integer.MAX_VALUE);
profilingResult.config.getKafka().setConnectionAttemptsPerSec(Integer.MAX_VALUE);
profilingResult.config.getKafka().setMessageMaxBytes(11534336);
profilingResult.config.getKafka().setStorageClass(storage.name().toLowerCase());
profilingResult.config.getZookeeper().setVolumeSize(storage.zookeeperSize);
// once we make the determination, create the instance
// not used as quota is turned off
profilingResult.capacity = kafkaProvisioner.defaultCapacity(40_000_000);
profilingResult.capacity.setMaxDataRetentionSize(Quantity.parse((GIGS * numberOfBrokers / 3) + "Gi"));
profilingResult.capacity.setMaxPartitions(defaults.getKafka().getPartitionCapacity() * numberOfBrokers / 3);
Kafka kafka = profilingResult.config.getKafka();
LOGGER.info("Running with kafka sizing {} container memory, {} container cpu, and {} vm memory", kafka.getContainerMemory(), kafka.getContainerCpu(), kafka.getJvmXms());
// if running on m5.4xlarge or greater and want to constrain resources like m5.2xlarge (fully dedicated)
// profilingResult.config.getKafka().setContainerMemory("29013426176");
// profilingResult.config.getKafka().setContainerCpu("6500m");
// to constrain resources like m5.xlarge (fully dedicated)
// profilingResult.config.getKafka().setContainerMemory("12453740544");
// profilingResult.config.getKafka().setContainerCpu("2500m");
}
Aggregations