use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class MockControlPlane method loop.
@Scheduled(every = "{poll.interval}")
void loop() {
// only run simulation when needed
if (!this.runSimulation) {
return;
}
Random random = new Random(System.currentTimeMillis());
log.info("control plane:: Running Simulation");
// feed the start of clusters
if (this.kafkas.size() == 0) {
int max = Math.abs(random.nextInt(maxKafkas));
for (int i = 0; i < max; i++) {
ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
log.infof("control plane::marking %s for addition", k.getId());
this.kafkas.put(k.getId(), k);
}
}
// delete a instance by random
if (this.kafkas.size() > 1 && random.nextBoolean()) {
int idx = Math.abs(random.nextInt(this.kafkas.size()));
int i = 0;
for (ManagedKafka k : kafkas.values()) {
if (i++ < idx) {
continue;
} else {
markForDeletion(k.getId());
break;
}
}
}
// selectively add
if (this.kafkas.size() < maxKafkas && random.nextBoolean()) {
ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
log.infof("control plane:: creating a new cluster %s ", k.getId());
this.kafkas.put(k.getId(), k);
}
log.info("--------------------------------------------------");
for (ManagedKafka mk : this.kafkas.values()) {
log.infof("ManagedKafka: %s, delete requested: %s", mk.getId(), mk.getSpec().isDeleted());
}
log.info("--------------------------------------------------");
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class MockControlPlane method updateKafkaClustersStatus.
@Override
public void updateKafkaClustersStatus(@PathParam(value = "id") String id, Map<String, ManagedKafkaStatus> statusMap) {
log.infof("control plane:: updateKafkaClustersStatus <- Received from cluster %s, %s", id, statusMap);
// clean up the deleted
statusMap.forEach((k, v) -> {
log.infof("control plane:: Status of %s received", k);
ManagedKafka mk = this.kafkas.get(k);
if (mk != null) {
if (mk.getSpec().isDeleted() && isDeleted(v)) {
log.infof("control plane:: Removing cluster %s as it is deleted", mk.getId());
this.kafkas.remove(k);
this.kafkaStatus.remove(k);
} else {
this.kafkaStatus.put(k, v);
}
}
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class LogCollector method saveClusterState.
private static void saveClusterState(Path logpath) throws IOException {
KubeClient kube = KubeClient.getInstance();
Files.writeString(logpath.resolve("describe-cluster-nodes.log"), kube.cmdClient().exec(false, false, "describe", "nodes").out());
Files.writeString(logpath.resolve("all-events.log"), kube.cmdClient().exec(false, false, "get", "events", "--all-namespaces").out());
Files.writeString(logpath.resolve("pvs.log"), kube.cmdClient().exec(false, false, "describe", "pv").out());
Files.writeString(logpath.resolve("operator-routes.yml"), kube.cmdClient().exec(false, false, "get", "routes", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-services.yml"), kube.cmdClient().exec(false, false, "get", "service", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("kas-fleetshard-operator-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("strimzi-kafka-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app.kubernetes.io/managed-by=strimzi-cluster-operator", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("managedkafkas.yml"), kube.cmdClient().exec(false, false, "get", "managedkafka", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("kafkas.yml"), kube.cmdClient().exec(false, false, "get", "kafka", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("pods-managed-by-operator.yml"), kube.cmdClient().exec(false, false, "get", "pods", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-namespace-events.yml"), kube.cmdClient().exec(false, false, "get", "events", "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("operator.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.OPERATOR_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("sync.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.SYNC_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
StrimziOperatorManager.getStrimziOperatorPods().forEach(pod -> {
try {
Files.writeString(logpath.resolve(pod.getMetadata().getName() + ".log"), kube.cmdClient().exec(false, false, "logs", pod.getMetadata().getName(), "--tail", "-1", "-n", pod.getMetadata().getNamespace()).out());
} catch (Exception e) {
LOGGER.warn("Cannot get logs from pod {} in namespace {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace());
}
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class Canary method buildEnvVar.
private List<EnvVar> buildEnvVar(ManagedKafka managedKafka, Deployment current) {
List<EnvVar> envVars = new ArrayList<>(10);
String bootstrap = getBootstrapURL(managedKafka);
envVars.add(new EnvVarBuilder().withName("KAFKA_BOOTSTRAP_SERVERS").withValue(bootstrap).build());
envVars.add(new EnvVarBuilder().withName("RECONCILE_INTERVAL_MS").withValue("5000").build());
envVars.add(new EnvVarBuilder().withName("EXPECTED_CLUSTER_SIZE").withValue(String.valueOf(kafkaCluster.getReplicas(managedKafka))).build());
String kafkaVersion = managedKafka.getSpec().getVersions().getKafka();
// takes the current Kafka version if the canary already exists. During Kafka upgrades it doesn't have to change, as any other clients.
if (current != null) {
Optional<EnvVar> kafkaVersionEnvVar = current.getSpec().getTemplate().getSpec().getContainers().stream().filter(container -> "canary".equals(container.getName())).findFirst().get().getEnv().stream().filter(ev -> "KAFKA_VERSION".equals(ev.getName())).findFirst();
if (kafkaVersionEnvVar.isPresent()) {
kafkaVersion = kafkaVersionEnvVar.get().getValue();
}
}
envVars.add(new EnvVarBuilder().withName("KAFKA_VERSION").withValue(kafkaVersion).build());
envVars.add(new EnvVarBuilder().withName("TZ").withValue("UTC").build());
envVars.add(new EnvVarBuilder().withName("TLS_ENABLED").withValue("true").build());
envVars.add(new EnvVarBuilder().withName("TLS_CA_CERT").withValue("/tmp/tls-ca-cert/ca.crt").build());
// Deprecated
EnvVarSource saramaLogEnabled = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("sarama.log.enabled").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
EnvVarSource verbosityLogLevel = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("verbosity.log.level").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
EnvVarSource goDebug = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("go.debug").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
envVars.add(new EnvVarBuilder().withName("SARAMA_LOG_ENABLED").withValueFrom(saramaLogEnabled).build());
envVars.add(new EnvVarBuilder().withName("VERBOSITY_LOG_LEVEL").withValueFrom(verbosityLogLevel).build());
envVars.add(new EnvVarBuilder().withName("GODEBUG").withValueFrom(goDebug).build());
envVars.add(new EnvVarBuilder().withName("TOPIC").withValue(config.getCanary().getTopic()).build());
envVars.add(new EnvVarBuilder().withName("TOPIC_CONFIG").withValue("retention.ms=600000;segment.bytes=16384").build());
envVars.add(new EnvVarBuilder().withName("CLIENT_ID").withValue(config.getCanary().getClientId()).build());
envVars.add(new EnvVarBuilder().withName("CONSUMER_GROUP_ID").withValue(config.getCanary().getConsumerGroupId()).build());
envVars.add(new EnvVarBuilder().withName("PRODUCER_LATENCY_BUCKETS").withValue(producerLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("ENDTOEND_LATENCY_BUCKETS").withValue(endToEndLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("CONNECTION_CHECK_LATENCY_BUCKETS").withValue(connectionCheckLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("DYNAMIC_CONFIG_FILE").withValue(CANARY_DYNAMIC_CONFIG_JSON.toString()).build());
if (SecuritySecretManager.isCanaryServiceAccountPresent(managedKafka)) {
envVars.add(new EnvVarBuilder().withName("SASL_MECHANISM").withValue("PLAIN").build());
addEnvVarFromSecret(envVars, "SASL_USER", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PRINCIPAL);
addEnvVarFromSecret(envVars, "SASL_PASSWORD", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PASSWORD);
}
envVars.add(new EnvVarBuilder().withName("STATUS_TIME_WINDOW_MS").withValue(String.valueOf(statusTimeWindowMs)).build());
return envVars;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method buildKafkaConfig.
private Map<String, Object> buildKafkaConfig(ManagedKafka managedKafka, Kafka current) {
Map<String, Object> config = new HashMap<>();
int scalingAndReplicationFactor = this.config.getKafka().getScalingAndReplicationFactor();
config.put("offsets.topic.replication.factor", scalingAndReplicationFactor);
config.put("transaction.state.log.min.isr", Math.min(scalingAndReplicationFactor, 2));
config.put("transaction.state.log.replication.factor", scalingAndReplicationFactor);
config.put("auto.create.topics.enable", "false");
config.put("min.insync.replicas", Math.min(scalingAndReplicationFactor, 2));
config.put("default.replication.factor", scalingAndReplicationFactor);
config.put("log.message.format.version", this.kafkaManager.currentKafkaLogMessageFormatVersion(managedKafka));
config.put("inter.broker.protocol.version", this.kafkaManager.currentKafkaIbpVersion(managedKafka));
config.put("ssl.enabled.protocols", "TLSv1.3,TLSv1.2");
config.put("ssl.protocol", "TLS");
ManagedKafkaAuthenticationOAuth oauth = managedKafka.getSpec().getOauth();
var maximumSessionLifetime = oauth != null ? oauth.getMaximumSessionLifetime() : null;
long maxReauthMs = maximumSessionLifetime != null ? Math.max(maximumSessionLifetime, 0) : this.config.getKafka().getMaximumSessionLifetimeDefault();
config.put("connections.max.reauth.ms", maxReauthMs);
if (managedKafka.getSpec().getVersions().compareStrimziVersionTo(Versions.STRIMZI_CLUSTER_OPERATOR_V0_23_0_4) >= 0) {
// extension to manage the create topic to ensure valid Replication Factor and ISR
config.put("create.topic.policy.class.name", "io.bf2.kafka.topic.ManagedKafkaCreateTopicPolicy");
}
// forcing the preferred leader election as soon as possible
// NOTE: mostly useful for canary when Kafka brokers roll, partitions move but a preferred leader is not elected
// this could be removed, when we contribute to Sarama to have the support for Elect Leader API
config.put("leader.imbalance.per.broker.percentage", 0);
config.put(MESSAGE_MAX_BYTES, this.config.getKafka().getMessageMaxBytes());
// configure quota plugin
if (this.config.getKafka().isEnableQuota()) {
addQuotaConfig(managedKafka, current, config);
}
// custom authorizer configuration
addKafkaAuthorizerConfig(managedKafka, config);
if (managedKafka.getSpec().getCapacity().getMaxPartitions() != null) {
config.put(MAX_PARTITIONS, managedKafka.getSpec().getCapacity().getMaxPartitions());
}
config.put("strimzi.authorization.custom-authorizer.partition-counter.timeout-seconds", 10);
config.put("strimzi.authorization.custom-authorizer.partition-counter.schedule-interval-seconds", 15);
config.put("strimzi.authorization.custom-authorizer.partition-counter.private-topic-prefix", this.config.kafka.acl.privatePrefix);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.name", "controlplane-9090");
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.port", 9090);
config.put("strimzi.authorization.custom-authorizer.adminclient-listener.protocol", "SSL");
return config;
}
Aggregations