use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaValueProdMinimumTest method doTestValueProdMinimum.
private void doTestValueProdMinimum(ManagedKafkaCapacity capacity, int workerProducerRate, int numClients, String zkContainerMemory, String zkJavaMemory, String kafkaContainerMemory, String kafkaJavaMemory, String kfCpu, int topics, int partitionsPerTopic, String key, String testName) throws Exception {
int numWorkers = numClients / 10;
int messageSize = 1024;
ensureClientClusterCapacityForWorkers(omb.getOmbCluster(), numWorkers, WORKER_SIZE, CPU_SIZE);
workers = omb.deployWorkers(numWorkers);
LOGGER.info("Test config: {}", key);
KafkaInstanceConfiguration profile = AdopterProfile.buildProfile(zkContainerMemory, zkJavaMemory, "1000m", kafkaContainerMemory, kafkaJavaMemory, kfCpu);
String bootstrapHosts = kafkaProvisioner.deployCluster("cluster1", capacity, profile).waitUntilReady();
OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(bootstrapHosts).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
int producerConsumer = numClients / topics / 2;
OMBWorkloadResult result = omb.runWorkload(instanceDir, driver, workers, new OMBWorkload().setName(key).setTopics(topics).setPartitionsPerTopic(partitionsPerTopic).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(producerConsumer).setProducersPerTopic(producerConsumer).setProducerRate(workerProducerRate).setConsumerBacklogSizeGB(0));
LOGGER.info("{} : results {}", key, result.getResultFile());
// double threshold = 0.9 * targetRate;
// List<Double> lowProduceRates = result.getTestResult().publishRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// List<Double> lowConsumeRates = result.getTestResult().consumeRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// LOGGER.info("{}: low produce : {} low consume: {}", key, lowProduceRates, lowConsumeRates);
// assertTrue(lowProduceRates.isEmpty(), "Unexpectedly low produce rate(s): " + lowProduceRates);
// assertTrue(lowConsumeRates.isEmpty(), "Unexpectedly low consume rate(s): " + lowConsumeRates);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaWithSizeChanges.
@Test
void testManagedKafkaToKafkaWithSizeChanges() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
ObjectMapper objectMapper = new ObjectMapper();
KafkaInstanceConfiguration clone = objectMapper.readValue(objectMapper.writeValueAsString(config), KafkaInstanceConfiguration.class);
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
Kafka kafka = kafkaCluster.kafkaFrom(exampleManagedKafka("60Gi"), null);
Kafka reduced = kafkaCluster.kafkaFrom(exampleManagedKafka("40Gi"), kafka);
// should not change to a smaller size
diffToExpected(reduced, "/expected/strimzi.yml");
Kafka larger = kafkaCluster.kafkaFrom(exampleManagedKafka("80Gi"), kafka);
// should change to a larger size
diffToExpected(larger, "/expected/strimzi.yml", "[{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.soft\",\"value\":\"28633115306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.hard\",\"value\":\"28675058306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/storage/volumes/0/size\",\"value\":\"39412476546\"}]");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AdopterProfile method buildProfile.
public static KafkaInstanceConfiguration buildProfile(String zookeeperContainerMemory, String zookeeperJavaMemory, String zookeeperCpu, String kafkaContainerMemory, String kafkaJavaMemory, String kafkaCpu) {
KafkaInstanceConfiguration config = new KafkaInstanceConfiguration();
config.getKafka().setMaxConnections(Integer.MAX_VALUE);
config.getKafka().setConnectionAttemptsPerSec(Integer.MAX_VALUE);
config.getKafka().setOneInstancePerNode(true);
config.getKafka().setColocateWithZookeeper(BROKER_COLLOCATED_WITH_ZOOKEEPER);
config.setColocateWithZookeeper(BROKER_COLLOCATED_WITH_ZOOKEEPER);
config.getKafka().setContainerMemory(kafkaContainerMemory);
config.getKafka().setContainerCpu(kafkaCpu);
config.getKafka().setJvmXms(kafkaJavaMemory);
config.getKafka().setEnableQuota(false);
config.getZookeeper().setContainerCpu(zookeeperCpu);
config.getZookeeper().setContainerMemory(zookeeperContainerMemory);
config.getZookeeper().setJvmXms(zookeeperJavaMemory);
openListenersAndAccess(config);
return config;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testScalingAndReplicationFactor.
@Test
void testScalingAndReplicationFactor() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
KafkaInstanceConfiguration clone = Serialization.clone(config);
clone.getKafka().setScalingAndReplicationFactor(1);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka.getSpec().getKafka().getConfig(), "/expected/scaling-one.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaWithCustomConfiguration.
@Test
void testManagedKafkaToKafkaWithCustomConfiguration() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
KafkaInstanceConfiguration clone = Serialization.clone(config);
clone.getKafka().setConnectionAttemptsPerSec(300);
clone.getKafka().setContainerMemory("2Gi");
clone.getKafka().setJvmXx("foo bar, foo2 bar2");
clone.getZookeeper().setReplicas(5);
clone.getZookeeper().setContainerMemory("11Gi");
clone.getZookeeper().setJvmXx("zkfoo zkbar, zkfoo2 zkbar2");
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
mk.getSpec().getCapacity().setMaxPartitions(2 * clone.getKafka().getPartitionCapacity());
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka, "/expected/custom-config-strimzi.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
Aggregations