Search in sources :

Example 26 with Kafka

use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaManager method currentKafkaLogMessageFormatVersion.

/**
 * Returns the current Kafka log message format version for the Kafka instance
 * It comes directly from the Kafka custom resource or from the ManagedKafka in case of creation
 *
 * @param managedKafka ManagedKafka instance
 * @return current Kafka log message format version for the Kafka instance
 */
public String currentKafkaLogMessageFormatVersion(ManagedKafka managedKafka) {
    Kafka kafka = cachedKafka(managedKafka);
    String kafkaLogMessageFormatVersion;
    String current;
    // on first time Kafka resource creation, we take the Kafka log message format version from the ManagedKafka resource spec
    if (kafka != null) {
        Object logMessageFormat = kafka.getSpec().getKafka().getConfig().get("log.message.format.version");
        current = logMessageFormat != null ? logMessageFormat.toString() : kafka.getSpec().getKafka().getVersion();
    } else {
        current = managedKafka.getSpec().getVersions().getKafka();
    }
    kafkaLogMessageFormatVersion = AbstractKafkaCluster.getKafkaLogMessageFormatVersion(current);
    log.debugf("[%s/%s] currentKafkaLogMessageFormatVersion = %s", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), kafkaLogMessageFormatVersion);
    return kafkaLogMessageFormatVersion;
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 27 with Kafka

use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaManager method currentKafkaIbpVersion.

/**
 * Returns the current Kafka inter broker protocol version for the Kafka instance
 * It comes directly from the Kafka custom resource or from the ManagedKafka in case of creation
 *
 * @param managedKafka ManagedKafka instance
 * @return current Kafka inter broker protocol version for the Kafka instance
 */
public String currentKafkaIbpVersion(ManagedKafka managedKafka) {
    Kafka kafka = cachedKafka(managedKafka);
    String kafkaIbpVersion;
    // on first time Kafka resource creation, we take the Kafka inter broker protocol version from the ManagedKafka resource spec
    if (kafka != null) {
        Object interBrokerProtocol = kafka.getSpec().getKafka().getConfig().get("inter.broker.protocol.version");
        kafkaIbpVersion = interBrokerProtocol != null ? AbstractKafkaCluster.getKafkaIbpVersion(interBrokerProtocol.toString()) : AbstractKafkaCluster.getKafkaIbpVersion(kafka.getSpec().getKafka().getVersion());
    } else {
        kafkaIbpVersion = managedKafka.getSpec().getVersions().getKafkaIbp();
        // dealing with ManagedKafka instances not having the IBP field specified
        if (kafkaIbpVersion == null) {
            kafkaIbpVersion = AbstractKafkaCluster.getKafkaIbpVersion(managedKafka.getSpec().getVersions().getKafka());
        }
    }
    log.debugf("[%s/%s] currentKafkaIbpVersion = %s", managedKafka.getMetadata().getNamespace(), managedKafka.getMetadata().getName(), kafkaIbpVersion);
    return kafkaIbpVersion;
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 28 with Kafka

use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaProvisioner method install.

/**
 * Install this Kafka provisioner. This can be called once per test class or per test method.
 */
public void install() throws Exception {
    // delete/create the namespaces to be used
    Map<String, String> nsAnnotations = new HashMap<>();
    if (PerformanceEnvironment.KAFKA_COLLECT_LOG) {
        nsAnnotations.put(Constants.ORG_BF2_KAFKA_PERFORMANCE_COLLECTPODLOG, "true");
    }
    cluster.waitForDeleteNamespace(StrimziOperatorManager.OPERATOR_NS);
    FleetShardOperatorManager.deleteFleetShard(cluster.kubeClient()).get(2, TimeUnit.MINUTES);
    cluster.createNamespace(Constants.KAFKA_NAMESPACE, nsAnnotations, Map.of());
    List<Node> workers = cluster.getWorkerNodes();
    boolean smallNodes = workers.stream().anyMatch(n -> TestUtils.getMaxAvailableResources(n).cpuMillis < 3000);
    if (smallNodes) {
        MixedOperation<Deployment, DeploymentList, RollableScalableResource<Deployment>> deployments = cluster.kubeClient().client().apps().deployments();
        this.informer = deployments.inAnyNamespace().inform(new ResourceEventHandler<Deployment>() {

            @Override
            public void onUpdate(Deployment oldObj, Deployment newObj) {
                onAdd(newObj);
            }

            @Override
            public void onDelete(Deployment obj, boolean deletedFinalStateUnknown) {
            }

            @Override
            public void onAdd(Deployment obj) {
                if (!obj.getMetadata().getNamespace().equals(StrimziOperatorManager.OPERATOR_NS) && !obj.getMetadata().getNamespace().equals(FleetShardOperatorManager.OPERATOR_NS)) {
                    return;
                }
                // patch any deployment that requests a lot of cpu, and make sure it's on the perf infra
                deployments.inNamespace(obj.getMetadata().getNamespace()).withName(obj.getMetadata().getName()).edit(new TypedVisitor<ResourceRequirementsBuilder>() {

                    @Override
                    public void visit(ResourceRequirementsBuilder element) {
                        Quantity cpu = null;
                        if (element.getRequests() != null) {
                            cpu = element.getRequests().get("cpu");
                        }
                        if (cpu == null && element.getLimits() != null) {
                            cpu = element.getLimits().get("cpu");
                        }
                        if (cpu != null && Quantity.getAmountInBytes(cpu).compareTo(BigDecimal.valueOf(1)) > 0) {
                            element.addToRequests("cpu", Quantity.parse("1"));
                        }
                    }
                });
            }
        });
    }
    // installs the Strimzi Operator using the OLM bundle
    CompletableFuture<Void> strimziFuture = strimziManager.deployStrimziOperator();
    cluster.connectNamespaceToMonitoringStack(StrimziOperatorManager.OPERATOR_NS);
    // installs a cluster wide fleetshard operator
    // not looking at the returned futures - it's assumed that we'll eventually wait on the managed kafka deployment
    CompletableFuture<Void> future = FleetShardOperatorManager.deployFleetShardOperator(cluster.kubeClient());
    CompletableFuture.allOf(future, strimziFuture).get(2, TimeUnit.MINUTES);
    var agentResource = this.cluster.kubeClient().client().resource(new ManagedKafkaAgentBuilder().withNewMetadata().withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME).withNamespace(FleetShardOperatorManager.OPERATOR_NS).endMetadata().withSpec(new ManagedKafkaAgentSpecBuilder().withNewObservability().withAccessToken("").withChannel("").withRepository("").withTag("").endObservability().build()).build());
    agentResource.createOrReplace();
    // FleetShardOperatorManager.deployFleetShardSync(cluster.kubeClient());
    cluster.connectNamespaceToMonitoringStack(FleetShardOperatorManager.OPERATOR_NS);
    strimziVersions = SyncApiClient.getSortedAvailableStrimziVersions(() -> agentResource.fromServer().get().getStatus()).collect(Collectors.toList());
}
Also used : HashMap(java.util.HashMap) Node(io.fabric8.kubernetes.api.model.Node) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) ManagedKafkaAgentSpecBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentSpecBuilder) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) Quantity(io.fabric8.kubernetes.api.model.Quantity) ManagedKafkaAgentBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentBuilder) ResourceEventHandler(io.fabric8.kubernetes.client.informers.ResourceEventHandler) DeploymentList(io.fabric8.kubernetes.api.model.apps.DeploymentList) RollableScalableResource(io.fabric8.kubernetes.client.dsl.RollableScalableResource)

Example 29 with Kafka

use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaInstanceScalingSmallTest method testConnections.

@ParameterizedTest(name = "tesConnections_m{0}_r{1}_p{2}_c{3}")
@CsvSource({ "100, 50, 8, 8, 4Gi, 750m", "100, 50, 1, 15, 4Gi, 750m", "100, 50, 15, 1, 4Gi, 750m" })
void testConnections(int maxConnections, int connectionCreationRate, int numProducers, int numConsumers, String ombWorkerMem, String ombWorkerCpu, TestInfo info) throws Exception {
    int messageSize = 1024;
    int targetRate = 2_000;
    int workersPerInstance = 2;
    ManagedKafkaCapacity capacity = kafkaProvisioner.defaultCapacity((long) targetRate * messageSize * 2);
    capacity.setMaxConnectionAttemptsPerSec(connectionCreationRate);
    capacity.setTotalMaxConnections(maxConnections);
    ManagedKafkaDeployment kafkaDeployment = kafkaProvisioner.deployCluster("cluster1", capacity, AdopterProfile.SMALL_VALUE_PROD);
    omb.setWorkerContainerMemory(Quantity.parse(ombWorkerMem));
    omb.setWorkerCpu(Quantity.parse(ombWorkerCpu));
    workers = omb.deployWorkers(workersPerInstance);
    Map<ManagedKafkaDeployment, List<String>> workerMapping = new HashMap<>();
    Iterator<String> workerIt = workers.iterator();
    Map<ManagedKafkaDeployment, String> instanceBootstrap = new HashMap<>();
    List<String> ws = new ArrayList<>();
    for (int w = 0; w < workersPerInstance; w++) {
        ws.add(workerIt.next());
    }
    workerMapping.put(kafkaDeployment, ws);
    instanceBootstrap.put(kafkaDeployment, kafkaDeployment.waitUntilReady());
    ExecutorService executorService = Executors.newFixedThreadPool(1);
    AtomicInteger timeout = new AtomicInteger();
    List<TestResult> testResults = new ArrayList<>();
    try {
        List<Future<OMBWorkloadResult>> results = new ArrayList<>();
        for (Map.Entry<ManagedKafkaDeployment, String> entry : instanceBootstrap.entrySet()) {
            File ombDir = new File(instanceDir, entry.getKey().getManagedKafka().getMetadata().getName());
            Files.createDirectories(ombDir.toPath());
            OMBDriver driver = new OMBDriver().setReplicationFactor(3).setTopicConfig("min.insync.replicas=2\n").setCommonConfigWithBootstrapUrl(entry.getValue()).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.reset=earliest\nenable.auto.commit=false\n");
            OMBWorkload workload = new OMBWorkload().setName(String.format("Kafka Cluster: %s", entry.getKey().getManagedKafka().getMetadata().getName())).setTopics(1).setPartitionsPerTopic(99).setMessageSize(messageSize).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(numConsumers).setConsumerPerSubscription(1).setProducersPerTopic(numProducers).setProducerRate(targetRate).setConsumerBacklogSizeGB(0);
            timeout.set(Math.max(workload.getTestDurationMinutes() + workload.getWarmupDurationMinutes(), timeout.get()));
            results.add(executorService.submit(() -> {
                OMBWorkloadResult result = omb.runWorkload(ombDir, driver, workerMapping.get(entry.getKey()), workload);
                LOGGER.info("Result stored in {}", result.getResultFile().getAbsolutePath());
                return result;
            }));
        }
        for (Future<OMBWorkloadResult> result : results) {
            testResults.add(result.get(timeout.get() * 2L, TimeUnit.MINUTES).getTestResult());
        }
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(1, TimeUnit.MINUTES);
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TestResult(io.openmessaging.benchmark.TestResult) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) CsvSource(org.junit.jupiter.params.provider.CsvSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 30 with Kafka

use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaInstanceScalingSmallTest method testValueProdCiCd.

@Test
void testValueProdCiCd(TestInfo info) throws Exception {
    int noOfWorkers = 2;
    int messageSize = 1024;
    int targetRate = 40_000;
    // Gather required info to spin up a kafka cluster and deploy kafka cluster.
    // create template to deploy the cluster
    ManagedKafkaCapacity capacity = kafkaProvisioner.defaultCapacity((long) targetRate * messageSize);
    ManagedKafkaDeployment kd = kafkaProvisioner.deployCluster("cicdcluster", capacity, AdopterProfile.VALUE_PROD);
    String instanceBootstrap = kd.waitUntilReady();
    // create omb workers
    workers = omb.deployWorkers(noOfWorkers);
    List<String> instanceWorkers = new ArrayList<>(workers);
    assertEquals(2, instanceWorkers.size(), String.format("failed to create %s omb workers", noOfWorkers));
    // create a directory for the test case.
    File instanceDir = new File(testDir, "testCiCd");
    assertTrue(instanceDir.mkdir(), String.format("failed to create directory %s", instanceDir.getName()));
    // create driver.
    File ombDir = new File(instanceDir, "testresults");
    Files.createDirectories(ombDir.toPath());
    OMBDriver driver = new OMBDriver().setReplicationFactor(1).setTopicConfig("min.insync.replicas=1\n").setCommonConfigWithBootstrapUrl(instanceBootstrap).setProducerConfig("acks=all\n").setConsumerConfig("auto.offset.rest=earliest\nenable.auto.commit=false\n");
    // construct the workload
    OMBWorkload ombWorkload = new OMBWorkload().setName("CiCdPerfTest").setTopics(1).setPartitionsPerTopic(10).setMessageSize(1024).setPayloadFile("src/test/resources/payload/payload-1Kb.data").setSubscriptionsPerTopic(1).setConsumerPerSubscription(1).setProducersPerTopic(1).setProducerRate(4_000).setConsumerBacklogSizeGB(0);
    // run the workload
    OMBWorkloadResult result = omb.runWorkload(ombDir, driver, instanceWorkers, ombWorkload);
    // store the filtered json data into a file
    TestUtils.createJsonObject(testDir, result.getTestResult());
}
Also used : ArrayList(java.util.ArrayList) File(java.io.File) ManagedKafkaCapacity(org.bf2.operator.resources.v1alpha1.ManagedKafkaCapacity) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)45 Kafka (io.strimzi.api.kafka.model.Kafka)31 Test (org.junit.jupiter.api.Test)24 List (java.util.List)19 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)19 QuarkusTest (io.quarkus.test.junit.QuarkusTest)18 Map (java.util.Map)18 ArrayList (java.util.ArrayList)17 Inject (javax.inject.Inject)13 Quantity (io.fabric8.kubernetes.api.model.Quantity)12 Optional (java.util.Optional)11 Collections (java.util.Collections)10 Collectors (java.util.stream.Collectors)10 HashMap (java.util.HashMap)9 Objects (java.util.Objects)9 StrimziManager (org.bf2.operator.managers.StrimziManager)9 Logger (org.jboss.logging.Logger)9 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 IOException (java.io.IOException)8 ManagedKafkaUtils.exampleManagedKafka (org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka)8