Search in sources :

Example 1 with ManagedKafkaCapacity

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaProvisioner method defaultCapacity.

public ManagedKafkaCapacity defaultCapacity(long ingressEgressThroughput) {
    ManagedKafkaCapacityBuilder capacityBuilder = new ManagedKafkaCapacityBuilder();
    capacityBuilder.withIngressEgressThroughputPerSec(Quantity.parse(String.valueOf(ingressEgressThroughput)));
    // TODO: this value is roughly 3x the old value from KafkaConfigurations
    // should probably default to Value Prod instead
    capacityBuilder.withMaxDataRetentionSize(Quantity.parse("600Gi"));
    return capacityBuilder.build();
}
Also used : ManagedKafkaCapacityBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacityBuilder)

Example 2 with ManagedKafkaCapacity

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class DrainCleanerTest method testKafkaAvailabilityDuringClusterUpgrade.

@Test
@Tag(TestTags.PERF)
void testKafkaAvailabilityDuringClusterUpgrade(TestInfo info) throws Exception {
    long throughput = 41943040;
    int numWorkers = 12;
    int topics = 10;
    int messageSize = 1024;
    int partitionsPerTopic = 205;
    int workerProducerRate = 40000;
    ensureClientClusterCapacityForWorkers(omb.getOmbCluster(), numWorkers, WORKER_SIZE, CPU_SIZE);
    workers = omb.deployWorkers(numWorkers);
    ManagedKafkaCapacity capacity = kafkaProvisioner.defaultCapacity(throughput);
    ManagedKafkaDeployment deployCluster = kafkaProvisioner.deployCluster("cluster1", capacity, AdopterProfile.VALUE_PROD);
    String bootstrapHosts = deployCluster.waitUntilReady();
    final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(2);
    Future<Integer> nodeDrain;
    Future<File> resultDone;
    try {
        nodeDrain = scheduler.schedule(() -> {
            // this thread simulates the OpenShift cluster upgrade
            LOGGER.info("PERFORMING SCHEDULED NODES DRAIN");
            kafkaProvisioner.getKubernetesCluster().kubeClient().getClusterWorkers().forEach(node -> {
                TestUtils.drainNode(kafkaProvisioner.getKubernetesCluster(), node.getMetadata().getName());
                TestUtils.waitUntilAllPodsReady(kafkaProvisioner.getKubernetesCluster(), deployCluster.getManagedKafka().getMetadata().getNamespace());
                TestUtils.setNodeSchedule(kafkaProvisioner.getKubernetesCluster(), node.getMetadata().getName(), true);
            });
            return 0;
        }, 2, TimeUnit.MINUTES);
        resultDone = scheduler.submit(() -> {
            OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(bootstrapHosts).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
            int producerConsumer = numWorkers / 2;
            LOGGER.info("PERFORMING OMB WORKLOAD");
            OMBWorkloadResult result = omb.runWorkload(instanceDir, driver, workers, new OMBWorkload().setName(info.getDisplayName()).setTopics(topics).setPartitionsPerTopic(partitionsPerTopic).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(producerConsumer).setProducersPerTopic(producerConsumer).setProducerRate(workerProducerRate).setConsumerBacklogSizeGB(0).setTestDurationMinutes(15));
            LOGGER.info("{}: results {}", info.getDisplayName(), result.getResultFile());
            LOGGER.info("COMPLETED OMB WORKLOAD");
            return result.getResultFile();
        });
        Integer podTaskRetVal = nodeDrain.get(25, TimeUnit.MINUTES);
        LOGGER.info("Node drain task return value: {}", podTaskRetVal.toString());
        File resultFile = resultDone.get(25, TimeUnit.MINUTES);
        LOGGER.info("Result file: {}", resultFile);
    } finally {
        scheduler.shutdown();
        scheduler.awaitTermination(1, TimeUnit.MINUTES);
    }
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) KubeClusterResource(org.bf2.performance.framework.KubeClusterResource) TestTags(org.bf2.performance.framework.TestTags) BeforeEach(org.junit.jupiter.api.BeforeEach) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) TestInfo(org.junit.jupiter.api.TestInfo) File(java.io.File) Executors(java.util.concurrent.Executors) AfterAll(org.junit.jupiter.api.AfterAll) Test(org.junit.jupiter.api.Test) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) List(java.util.List) Future(java.util.concurrent.Future) Logger(org.apache.logging.log4j.Logger) BeforeAll(org.junit.jupiter.api.BeforeAll) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Tag(org.junit.jupiter.api.Tag) LogManager(org.apache.logging.log4j.LogManager) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) File(java.io.File) Test(org.junit.jupiter.api.Test) Tag(org.junit.jupiter.api.Tag)

Example 3 with ManagedKafkaCapacity

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaInstanceScalingSmallTest method testConnections.

@ParameterizedTest(name = "tesConnections_m{0}_r{1}_p{2}_c{3}")
@CsvSource({ "100, 50, 8, 8, 4Gi, 750m", "100, 50, 1, 15, 4Gi, 750m", "100, 50, 15, 1, 4Gi, 750m" })
void testConnections(int maxConnections, int connectionCreationRate, int numProducers, int numConsumers, String ombWorkerMem, String ombWorkerCpu, TestInfo info) throws Exception {
    int messageSize = 1024;
    int targetRate = 2_000;
    int workersPerInstance = 2;
    ManagedKafkaCapacity capacity = kafkaProvisioner.defaultCapacity((long) targetRate * messageSize * 2);
    capacity.setMaxConnectionAttemptsPerSec(connectionCreationRate);
    capacity.setTotalMaxConnections(maxConnections);
    ManagedKafkaDeployment kafkaDeployment = kafkaProvisioner.deployCluster("cluster1", capacity, AdopterProfile.SMALL_VALUE_PROD);
    omb.setWorkerContainerMemory(Quantity.parse(ombWorkerMem));
    omb.setWorkerCpu(Quantity.parse(ombWorkerCpu));
    workers = omb.deployWorkers(workersPerInstance);
    Map<ManagedKafkaDeployment, List<String>> workerMapping = new HashMap<>();
    Iterator<String> workerIt = workers.iterator();
    Map<ManagedKafkaDeployment, String> instanceBootstrap = new HashMap<>();
    List<String> ws = new ArrayList<>();
    for (int w = 0; w < workersPerInstance; w++) {
        ws.add(workerIt.next());
    }
    workerMapping.put(kafkaDeployment, ws);
    instanceBootstrap.put(kafkaDeployment, kafkaDeployment.waitUntilReady());
    ExecutorService executorService = Executors.newFixedThreadPool(1);
    AtomicInteger timeout = new AtomicInteger();
    List<TestResult> testResults = new ArrayList<>();
    try {
        List<Future<OMBWorkloadResult>> results = new ArrayList<>();
        for (Map.Entry<ManagedKafkaDeployment, String> entry : instanceBootstrap.entrySet()) {
            File ombDir = new File(instanceDir, entry.getKey().getManagedKafka().getMetadata().getName());
            Files.createDirectories(ombDir.toPath());
            OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(entry.getValue()).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
            OMBWorkload workload = new OMBWorkload().setName(String.format("Kafka Cluster: %s", entry.getKey().getManagedKafka().getMetadata().getName())).setTopics(1).setPartitionsPerTopic(99).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(numConsumers).setConsumerPerSubscription(1).setProducersPerTopic(numProducers).setProducerRate(targetRate).setConsumerBacklogSizeGB(0);
            timeout.set(Math.max(workload.getTestDurationMinutes() + workload.getWarmupDurationMinutes(), timeout.get()));
            results.add(executorService.submit(() -> {
                OMBWorkloadResult result = omb.runWorkload(ombDir, driver, workerMapping.get(entry.getKey()), workload);
                LOGGER.info("Result stored in {}", result.getResultFile().getAbsolutePath());
                return result;
            }));
        }
        for (Future<OMBWorkloadResult> result : results) {
            testResults.add(result.get(timeout.get() * 2L, TimeUnit.MINUTES).getTestResult());
        }
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(1, TimeUnit.MINUTES);
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TestResult(io.openmessaging.benchmark.TestResult) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) CsvSource(org.junit.jupiter.params.provider.CsvSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 4 with ManagedKafkaCapacity

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaInstanceScalingSmallTest method testValueProdCiCd.

@Test
void testValueProdCiCd(TestInfo info) throws Exception {
    int noOfWorkers = 2;
    int messageSize = 1024;
    int targetRate = 40_000;
    // Gather required info to spin up a kafka cluster and deploy kafka cluster.
    // create template to deploy the cluster
    ManagedKafkaCapacity capacity = kafkaProvisioner.defaultCapacity((long) targetRate * messageSize);
    ManagedKafkaDeployment kd = kafkaProvisioner.deployCluster("cicdcluster", capacity, AdopterProfile.VALUE_PROD);
    String instanceBootstrap = kd.waitUntilReady();
    // create omb workers
    workers = omb.deployWorkers(noOfWorkers);
    List<String> instanceWorkers = new ArrayList<>(workers);
    assertEquals(2, instanceWorkers.size(), String.format("failed to create %s omb workers", noOfWorkers));
    // create a directory for the test case.
    File instanceDir = new File(testDir, "testCiCd");
    assertTrue(instanceDir.mkdir(), String.format("failed to create directory %s", instanceDir.getName()));
    // create driver.
    File ombDir = new File(instanceDir, "testresults");
    Files.createDirectories(ombDir.toPath());
    OMBDriver driver = new OMBDriver().setReplicationFactor(1).setTopicConfig("min.insync.replicas=1\n").setCommonConfigWithBootstrapUrl(instanceBootstrap).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.rest=earliest\nenable.auto.commit=false\n");
    // construct the workload
    OMBWorkload ombWorkload = new OMBWorkload().setName("CiCdPerfTest").setTopics(1).setPartitionsPerTopic(10).setMessageSize(1024).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(1).setProducersPerTopic(1).setProducerRate(4_000).setConsumerBacklogSizeGB(0);
    // run the workload
    OMBWorkloadResult result = omb.runWorkload(ombDir, driver, instanceWorkers, ombWorkload);
    // store the filtered json data into a file
    TestUtils.createJsonObject(testDir, result.getTestResult());
}
Also used : ArrayList(java.util.ArrayList) File(java.io.File) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 5 with ManagedKafkaCapacity

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaValueProdMinimumTest method doTestValueProdMinimum.

private void doTestValueProdMinimum(ManagedKafkaCapacity capacity, int workerProducerRate, int numClients, String zkContainerMemory, String zkJavaMemory, String kafkaContainerMemory, String kafkaJavaMemory, String kfCpu, int topics, int partitionsPerTopic, String key, String testName) throws Exception {
    int numWorkers = numClients / 10;
    int messageSize = 1024;
    ensureClientClusterCapacityForWorkers(omb.getOmbCluster(), numWorkers, WORKER_SIZE, CPU_SIZE);
    workers = omb.deployWorkers(numWorkers);
    LOGGER.info("Test config: {}", key);
    KafkaInstanceConfiguration profile = AdopterProfile.buildProfile(zkContainerMemory, zkJavaMemory, "1000m", kafkaContainerMemory, kafkaJavaMemory, kfCpu);
    String bootstrapHosts = kafkaProvisioner.deployCluster("cluster1", capacity, profile).waitUntilReady();
    OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(bootstrapHosts).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
    int producerConsumer = numClients / topics / 2;
    OMBWorkloadResult result = omb.runWorkload(instanceDir, driver, workers, new OMBWorkload().setName(key).setTopics(topics).setPartitionsPerTopic(partitionsPerTopic).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(producerConsumer).setProducersPerTopic(producerConsumer).setProducerRate(workerProducerRate).setConsumerBacklogSizeGB(0));
    LOGGER.info("{} : results {}", key, result.getResultFile());
// double threshold = 0.9 * targetRate;
// List<Double> lowProduceRates = result.getTestResult().publishRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// List<Double> lowConsumeRates = result.getTestResult().consumeRate.stream().filter(rate -> rate < threshold).collect(Collectors.toList());
// LOGGER.info("{}: low produce : {} low consume: {}", key, lowProduceRates, lowConsumeRates);
// assertTrue(lowProduceRates.isEmpty(), "Unexpectedly low produce rate(s): " + lowProduceRates);
// assertTrue(lowConsumeRates.isEmpty(), "Unexpectedly low consume rate(s): " + lowConsumeRates);
}
Also used : KafkaInstanceConfiguration(org.bf2.operator.operands.KafkaInstanceConfiguration)

Aggregations

ManagedKafkaCapacity (org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity)4 File (java.io.File)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Future (java.util.concurrent.Future)2 Tag (org.junit.jupiter.api.Tag)2 Test (org.junit.jupiter.api.Test)2 CsvSource (org.junit.jupiter.params.provider.CsvSource)2 Quantity (io.fabric8.kubernetes.api.model.Quantity)1 TestResult (io.openmessaging.benchmark.TestResult)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 ExecutorService (java.util.concurrent.ExecutorService)1 Executors (java.util.concurrent.Executors)1 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)1 TimeUnit (java.util.concurrent.TimeUnit)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 LogManager (org.apache.logging.log4j.LogManager)1 Logger (org.apache.logging.log4j.Logger)1