Search in sources :

Example 36 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class MockControlPlane method loop.

@Scheduled(every = "{poll.interval}")
void loop() {
    // only run simulation when needed
    if (!this.runSimulation) {
        return;
    }
    Random random = new Random(System.currentTimeMillis());
    log.info("control plane:: Running Simulation");
    // feed the start of clusters
    if (this.kafkas.size() == 0) {
        int max = Math.abs(random.nextInt(maxKafkas));
        for (int i = 0; i < max; i++) {
            ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
            log.infof("control plane::marking %s for addition", k.getId());
            this.kafkas.put(k.getId(), k);
        }
    }
    // delete a instance by random
    if (this.kafkas.size() > 1 && random.nextBoolean()) {
        int idx = Math.abs(random.nextInt(this.kafkas.size()));
        int i = 0;
        for (ManagedKafka k : kafkas.values()) {
            if (i++ < idx) {
                continue;
            } else {
                markForDeletion(k.getId());
                break;
            }
        }
    }
    // selectively add
    if (this.kafkas.size() < maxKafkas && random.nextBoolean()) {
        ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
        log.infof("control plane:: creating a new cluster %s ", k.getId());
        this.kafkas.put(k.getId(), k);
    }
    log.info("--------------------------------------------------");
    for (ManagedKafka mk : this.kafkas.values()) {
        log.infof("ManagedKafka: %s, delete requested: %s", mk.getId(), mk.getSpec().isDeleted());
    }
    log.info("--------------------------------------------------");
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Random(java.util.Random) Scheduled(io.quarkus.scheduler.Scheduled)

Example 37 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class MockControlPlane method updateKafkaClustersStatus.

@Override
public void updateKafkaClustersStatus(@PathParam(value = "id") String id, Map<String, ManagedKafkaStatus> statusMap) {
    log.infof("control plane:: updateKafkaClustersStatus <- Received from cluster %s, %s", id, statusMap);
    // clean up the deleted
    statusMap.forEach((k, v) -> {
        log.infof("control plane:: Status of %s received", k);
        ManagedKafka mk = this.kafkas.get(k);
        if (mk != null) {
            if (mk.getSpec().isDeleted() && isDeleted(v)) {
                log.infof("control plane:: Removing cluster %s as it is deleted", mk.getId());
                this.kafkas.remove(k);
                this.kafkaStatus.remove(k);
            } else {
                this.kafkaStatus.put(k, v);
            }
        }
    });
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 38 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class LogCollector method saveClusterState.

private static void saveClusterState(Path logpath) throws IOException {
    KubeClient kube = KubeClient.getInstance();
    Files.writeString(logpath.resolve("describe-cluster-nodes.log"), kube.cmdClient().exec(false, false, "describe", "nodes").out());
    Files.writeString(logpath.resolve("all-events.log"), kube.cmdClient().exec(false, false, "get", "events", "--all-namespaces").out());
    Files.writeString(logpath.resolve("pvs.log"), kube.cmdClient().exec(false, false, "describe", "pv").out());
    Files.writeString(logpath.resolve("operator-routes.yml"), kube.cmdClient().exec(false, false, "get", "routes", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
    Files.writeString(logpath.resolve("operator-services.yml"), kube.cmdClient().exec(false, false, "get", "service", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
    Files.writeString(logpath.resolve("kas-fleetshard-operator-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
    Files.writeString(logpath.resolve("strimzi-kafka-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app.kubernetes.io/managed-by=strimzi-cluster-operator", "--all-namespaces", "-o", "yaml").out());
    Files.writeString(logpath.resolve("managedkafkas.yml"), kube.cmdClient().exec(false, false, "get", "managedkafka", "--all-namespaces", "-o", "yaml").out());
    Files.writeString(logpath.resolve("kafkas.yml"), kube.cmdClient().exec(false, false, "get", "kafka", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
    Files.writeString(logpath.resolve("pods-managed-by-operator.yml"), kube.cmdClient().exec(false, false, "get", "pods", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
    Files.writeString(logpath.resolve("operator-namespace-events.yml"), kube.cmdClient().exec(false, false, "get", "events", "-n", FleetShardOperatorManager.OPERATOR_NS).out());
    Files.writeString(logpath.resolve("operator.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.OPERATOR_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
    Files.writeString(logpath.resolve("sync.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.SYNC_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
    StrimziOperatorManager.getStrimziOperatorPods().forEach(pod -> {
        try {
            Files.writeString(logpath.resolve(pod.getMetadata().getName() + ".log"), kube.cmdClient().exec(false, false, "logs", pod.getMetadata().getName(), "--tail", "-1", "-n", pod.getMetadata().getNamespace()).out());
        } catch (Exception e) {
            LOGGER.warn("Cannot get logs from pod {} in namespace {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace());
        }
    });
}
Also used : KubeClient(org.bf2.test.k8s.KubeClient) IOException(java.io.IOException)

Example 39 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class Canary method buildEnvVar.

private List<EnvVar> buildEnvVar(ManagedKafka managedKafka, Deployment current) {
    List<EnvVar> envVars = new ArrayList<>(10);
    String bootstrap = getBootstrapURL(managedKafka);
    envVars.add(new EnvVarBuilder().withName("KAFKA_BOOTSTRAP_SERVERS").withValue(bootstrap).build());
    envVars.add(new EnvVarBuilder().withName("RECONCILE_INTERVAL_MS").withValue("5000").build());
    envVars.add(new EnvVarBuilder().withName("EXPECTED_CLUSTER_SIZE").withValue(String.valueOf(kafkaCluster.getReplicas(managedKafka))).build());
    String kafkaVersion = managedKafka.getSpec().getVersions().getKafka();
    // takes the current Kafka version if the canary already exists. During Kafka upgrades it doesn't have to change, as any other clients.
    if (current != null) {
        Optional<EnvVar> kafkaVersionEnvVar = current.getSpec().getTemplate().getSpec().getContainers().stream().filter(container -> "canary".equals(container.getName())).findFirst().get().getEnv().stream().filter(ev -> "KAFKA_VERSION".equals(ev.getName())).findFirst();
        if (kafkaVersionEnvVar.isPresent()) {
            kafkaVersion = kafkaVersionEnvVar.get().getValue();
        }
    }
    envVars.add(new EnvVarBuilder().withName("KAFKA_VERSION").withValue(kafkaVersion).build());
    envVars.add(new EnvVarBuilder().withName("TZ").withValue("UTC").build());
    envVars.add(new EnvVarBuilder().withName("TLS_ENABLED").withValue("true").build());
    envVars.add(new EnvVarBuilder().withName("TLS_CA_CERT").withValue("/tmp/tls-ca-cert/ca.crt").build());
    // Deprecated
    EnvVarSource saramaLogEnabled = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("sarama.log.enabled").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
    EnvVarSource verbosityLogLevel = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("verbosity.log.level").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
    EnvVarSource goDebug = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("go.debug").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
    envVars.add(new EnvVarBuilder().withName("SARAMA_LOG_ENABLED").withValueFrom(saramaLogEnabled).build());
    envVars.add(new EnvVarBuilder().withName("VERBOSITY_LOG_LEVEL").withValueFrom(verbosityLogLevel).build());
    envVars.add(new EnvVarBuilder().withName("GODEBUG").withValueFrom(goDebug).build());
    envVars.add(new EnvVarBuilder().withName("TOPIC").withValue(config.getCanary().getTopic()).build());
    envVars.add(new EnvVarBuilder().withName("TOPIC_CONFIG").withValue("retention.ms=600000;segment.bytes=16384").build());
    envVars.add(new EnvVarBuilder().withName("CLIENT_ID").withValue(config.getCanary().getClientId()).build());
    envVars.add(new EnvVarBuilder().withName("CONSUMER_GROUP_ID").withValue(config.getCanary().getConsumerGroupId()).build());
    envVars.add(new EnvVarBuilder().withName("PRODUCER_LATENCY_BUCKETS").withValue(producerLatencyBuckets).build());
    envVars.add(new EnvVarBuilder().withName("ENDTOEND_LATENCY_BUCKETS").withValue(endToEndLatencyBuckets).build());
    envVars.add(new EnvVarBuilder().withName("CONNECTION_CHECK_LATENCY_BUCKETS").withValue(connectionCheckLatencyBuckets).build());
    envVars.add(new EnvVarBuilder().withName("DYNAMIC_CONFIG_FILE").withValue(CANARY_DYNAMIC_CONFIG_JSON.toString()).build());
    if (SecuritySecretManager.isCanaryServiceAccountPresent(managedKafka)) {
        envVars.add(new EnvVarBuilder().withName("SASL_MECHANISM").withValue("PLAIN").build());
        addEnvVarFromSecret(envVars, "SASL_USER", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PRINCIPAL);
        addEnvVarFromSecret(envVars, "SASL_PASSWORD", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PASSWORD);
    }
    envVars.add(new EnvVarBuilder().withName("STATUS_TIME_WINDOW_MS").withValue(String.valueOf(statusTimeWindowMs)).build());
    return envVars;
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) VolumeMount(io.fabric8.kubernetes.api.model.VolumeMount) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) ImagePullSecretManager(org.bf2.operator.managers.ImagePullSecretManager) Container(io.fabric8.kubernetes.api.model.Container) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) ServicePort(io.fabric8.kubernetes.api.model.ServicePort) ServicePortBuilder(io.fabric8.kubernetes.api.model.ServicePortBuilder) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) DefaultBean(io.quarkus.arc.DefaultBean) ResourceRequirements(io.fabric8.kubernetes.api.model.ResourceRequirements) VolumeMountBuilder(io.fabric8.kubernetes.api.model.VolumeMountBuilder) EnvVarSourceBuilder(io.fabric8.kubernetes.api.model.EnvVarSourceBuilder) SecuritySecretManager(org.bf2.operator.managers.SecuritySecretManager) Map(java.util.Map) ContainerBuilder(io.fabric8.kubernetes.api.model.ContainerBuilder) Service(io.fabric8.kubernetes.api.model.Service) Path(java.nio.file.Path) ServiceBuilder(io.fabric8.kubernetes.api.model.ServiceBuilder) Instance(javax.enterprise.inject.Instance) Volume(io.fabric8.kubernetes.api.model.Volume) Probe(io.fabric8.kubernetes.api.model.Probe) OperandOverrideManager(org.bf2.operator.managers.OperandOverrideManager) OperandUtils(org.bf2.common.OperandUtils) IngressControllerManager(org.bf2.operator.managers.IngressControllerManager) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) VolumeBuilder(io.fabric8.kubernetes.api.model.VolumeBuilder) DeploymentBuilder(io.fabric8.kubernetes.api.model.apps.DeploymentBuilder) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) Startup(io.quarkus.runtime.Startup) List(java.util.List) EnvVarSource(io.fabric8.kubernetes.api.model.EnvVarSource) HTTPGetActionBuilder(io.fabric8.kubernetes.api.model.HTTPGetActionBuilder) Optional(java.util.Optional) ApplicationScoped(javax.enterprise.context.ApplicationScoped) ConfigProperty(org.eclipse.microprofile.config.inject.ConfigProperty) ContainerPortBuilder(io.fabric8.kubernetes.api.model.ContainerPortBuilder) ProbeBuilder(io.fabric8.kubernetes.api.model.ProbeBuilder) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Collections(java.util.Collections) EnvVarSourceBuilder(io.fabric8.kubernetes.api.model.EnvVarSourceBuilder) ArrayList(java.util.ArrayList) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) EnvVarSource(io.fabric8.kubernetes.api.model.EnvVarSource)

Example 40 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaCluster method buildKafkaConfig.

private Map<String, Object> buildKafkaConfig(ManagedKafka managedKafka, Kafka current) {
    Map<String, Object> config = new HashMap<>();
    int scalingAndReplicationFactor = this.config.getKafka().getScalingAndReplicationFactor();
    config.put("offsets.topic.replication.factor", scalingAndReplicationFactor);
    config.put("transaction.state.log.min.isr", Math.min(scalingAndReplicationFactor, 2));
    config.put("transaction.state.log.replication.factor", scalingAndReplicationFactor);
    config.put("auto.create.topics.enable", "false");
    config.put("min.insync.replicas", Math.min(scalingAndReplicationFactor, 2));
    config.put("default.replication.factor", scalingAndReplicationFactor);
    config.put("log.message.format.version", this.kafkaManager.currentKafkaLogMessageFormatVersion(managedKafka));
    config.put("inter.broker.protocol.version", this.kafkaManager.currentKafkaIbpVersion(managedKafka));
    config.put("ssl.enabled.protocols", "TLSv1.3,TLSv1.2");
    config.put("ssl.protocol", "TLS");
    ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
    var maximumSessionLifetime = oauth != null ? oauth.getMaximumSessionLifetime() : null;
    long maxReauthMs = maximumSessionLifetime != null ? Math.max(maximumSessionLifetime, 0) : this.config.getKafka().getMaximumSessionLifetimeDefault();
    config.put("connections.max.reauth.ms", maxReauthMs);
    if (managedKafka.getSpec().getVersions().compareStrimziVersionTo(Versions.STRIMZI_CLUSTER_OPERATOR_V0_23_0_4) >= 0) {
        // extension to manage the create topic to ensure valid Replication Factor and ISR
        config.put("create.topic.policy.class.name", "io.bf2.kafka.topic.ManagedKafkaCreateTopicPolicy");
    }
    // forcing the preferred leader election as soon as possible
    // NOTE: mostly useful for canary when Kafka brokers roll, partitions move but a preferred leader is not elected
    // this could be removed,  when we contribute to Sarama to have the support for Elect Leader API
    config.put("leader.imbalance.per.broker.percentage", 0);
    config.put(MESSAGE_MAX_BYTES, this.config.getKafka().getMessageMaxBytes());
    // configure quota plugin
    if (this.config.getKafka().isEnableQuota()) {
        addQuotaConfig(managedKafka, current, config);
    }
    // custom authorizer configuration
    addKafkaAuthorizerConfig(managedKafka, config);
    if (managedKafka.getSpec().getCapacity().getMaxPartitions() != null) {
        config.put(MAX_PARTITIONS, managedKafka.getSpec().getCapacity().getMaxPartitions());
    }
    config.put("strimzi.authorization.custom-authorizer.partition-counter.timeout-seconds", 10);
    config.put("strimzi.authorization.custom-authorizer.partition-counter.schedule-interval-seconds", 15);
    config.put("strimzi.authorization.custom-authorizer.partition-counter.private-topic-prefix", this.config.kafka.acl.privatePrefix);
    config.put("strimzi.authorization.custom-authorizer.adminclient-listener.name", "controlplane-9090");
    config.put("strimzi.authorization.custom-authorizer.adminclient-listener.port", 9090);
    config.put("strimzi.authorization.custom-authorizer.adminclient-listener.protocol", "SSL");
    return config;
}
Also used : HashMap(java.util.HashMap) ManagedKafkaAuthenticationOAuth(org.bf2.operator.resources.v1alpha1.ManagedKafkaAuthenticationOAuth) TopologySpreadConstraint(io.fabric8.kubernetes.api.model.TopologySpreadConstraint)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)67 Kafka (io.strimzi.api.kafka.model.Kafka)30 Test (org.junit.jupiter.api.Test)24 QuarkusTest (io.quarkus.test.junit.QuarkusTest)23 List (java.util.List)16 Map (java.util.Map)15 Inject (javax.inject.Inject)15 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)15 Objects (java.util.Objects)14 Quantity (io.fabric8.kubernetes.api.model.Quantity)11 Optional (java.util.Optional)11 Collectors (java.util.stream.Collectors)10 ApplicationScoped (javax.enterprise.context.ApplicationScoped)9 StrimziManager (org.bf2.operator.managers.StrimziManager)9 Logger (org.jboss.logging.Logger)9 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 ArrayList (java.util.ArrayList)8 Reason (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason)8 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)8 ManagedKafkaUtils.exampleManagedKafka (org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka)8