use of io.strimzi.api.kafka.model.KafkaClusterSpec in project strimzi by strimzi.
the class KafkaST method testCustomAndUpdatedValues.
@ParallelNamespaceTest
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
envVarGeneral.put("TEST_ENV_1", "test.env.one");
envVarGeneral.put("TEST_ENV_2", "test.env.two");
LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
envVarUpdated.put("TEST_ENV_3", "test.env.three");
// Kafka Broker config
Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "1");
kafkaConfig.put("transaction.state.log.replication.factor", "1");
kafkaConfig.put("default.replication.factor", "1");
Map<String, Object> updatedKafkaConfig = new HashMap<>();
updatedKafkaConfig.put("offsets.topic.replication.factor", "2");
updatedKafkaConfig.put("transaction.state.log.replication.factor", "2");
updatedKafkaConfig.put("default.replication.factor", "2");
// Zookeeper Config
Map<String, Object> zookeeperConfig = new HashMap<>();
zookeeperConfig.put("tickTime", "2000");
zookeeperConfig.put("initLimit", "5");
zookeeperConfig.put("syncLimit", "2");
zookeeperConfig.put("autopurge.purgeInterval", "1");
Map<String, Object> updatedZookeeperConfig = new HashMap<>();
updatedZookeeperConfig.put("tickTime", "2500");
updatedZookeeperConfig.put("initLimit", "3");
updatedZookeeperConfig.put("syncLimit", "5");
final int initialDelaySeconds = 30;
final int timeoutSeconds = 10;
final int updatedInitialDelaySeconds = 31;
final int updatedTimeoutSeconds = 11;
final int periodSeconds = 10;
final int successThreshold = 1;
final int failureThreshold = 3;
final int updatedPeriodSeconds = 5;
final int updatedFailureThreshold = 1;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2).editSpec().editKafka().withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endLivenessProbe().withConfig(kafkaConfig).withNewTemplate().withNewKafkaContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endKafkaContainer().endTemplate().endKafka().editZookeeper().withReplicas(2).withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).endLivenessProbe().withConfig(zookeeperConfig).withNewTemplate().withNewZookeeperContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endZookeeperContainer().endTemplate().endZookeeper().editEntityOperator().withNewTemplate().withNewTopicOperatorContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endTopicOperatorContainer().withNewUserOperatorContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endUserOperatorContainer().withNewTlsSidecarContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endTlsSidecarContainer().endTemplate().editUserOperator().withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endLivenessProbe().endUserOperator().editTopicOperator().withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endLivenessProbe().endTopicOperator().withNewTlsSidecar().withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endLivenessProbe().endTlsSidecar().endEntityOperator().endSpec().build());
final Map<String, String> kafkaSnapshot = PodUtils.podSnapshot(namespaceName, kafkaSelector);
final Map<String, String> zkSnapshot = PodUtils.podSnapshot(namespaceName, zkSelector);
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
LOGGER.info("Verify values before update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral);
String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=1"));
String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral);
LOGGER.info("Checking configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral);
LOGGER.info("Updating configuration of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka();
kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.setConfig(updatedKafkaConfig);
kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper();
zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.setConfig(updatedZookeeperConfig);
zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
// Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds
EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator();
entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
}, namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, zkSelector, 2, zkSnapshot);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 2, kafkaSnapshot);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
LOGGER.info("Verify values after update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated);
kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=2"));
kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated);
LOGGER.info("Getting entity operator to check configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated);
}
use of io.strimzi.api.kafka.model.KafkaClusterSpec in project strimzi by strimzi.
the class KafkaExporter method fromCrd.
public static KafkaExporter fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
KafkaExporter kafkaExporter = new KafkaExporter(reconciliation, kafkaAssembly);
KafkaExporterSpec spec = kafkaAssembly.getSpec().getKafkaExporter();
if (spec != null) {
kafkaExporter.isDeployed = true;
kafkaExporter.setResources(spec.getResources());
if (spec.getReadinessProbe() != null) {
kafkaExporter.setReadinessProbe(spec.getReadinessProbe());
}
if (spec.getLivenessProbe() != null) {
kafkaExporter.setLivenessProbe(spec.getLivenessProbe());
}
kafkaExporter.setGroupRegex(spec.getGroupRegex());
kafkaExporter.setTopicRegex(spec.getTopicRegex());
String image = spec.getImage();
if (image == null) {
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE, versions.kafkaImage(kafkaClusterSpec.getImage(), versions.defaultVersion().version()));
}
kafkaExporter.setImage(image);
kafkaExporter.setLogging(spec.getLogging());
kafkaExporter.setSaramaLoggingEnabled(spec.getEnableSaramaLogging());
if (spec.getTemplate() != null) {
KafkaExporterTemplate template = spec.getTemplate();
if (template.getDeployment() != null && template.getDeployment().getMetadata() != null) {
kafkaExporter.templateDeploymentLabels = template.getDeployment().getMetadata().getLabels();
kafkaExporter.templateDeploymentAnnotations = template.getDeployment().getMetadata().getAnnotations();
}
if (template.getContainer() != null && template.getContainer().getEnv() != null) {
kafkaExporter.templateContainerEnvVars = template.getContainer().getEnv();
}
if (template.getContainer() != null && template.getContainer().getSecurityContext() != null) {
kafkaExporter.templateContainerSecurityContext = template.getContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
kafkaExporter.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
kafkaExporter.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(kafkaExporter, template.getPod());
}
kafkaExporter.setVersion(versions.supportedVersion(kafkaAssembly.getSpec().getKafka().getVersion()).version());
kafkaExporter.setOwnerReference(kafkaAssembly);
} else {
kafkaExporter.isDeployed = false;
}
kafkaExporter.templatePodLabels = Util.mergeLabelsOrAnnotations(kafkaExporter.templatePodLabels, DEFAULT_POD_LABELS);
return kafkaExporter;
}
use of io.strimzi.api.kafka.model.KafkaClusterSpec in project strimzi by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (kafkaClusterSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
return result;
}
use of io.strimzi.api.kafka.model.KafkaClusterSpec in project strimzi-kafka-operator by strimzi.
the class EntityOperator method fromCrd.
/**
* Create a Entity Operator from given desired resource
*
* @param reconciliation The reconciliation
* @param kafkaAssembly desired resource with cluster configuration containing the Entity Operator one
* @param versions The versions.
* @return Entity Operator instance, null if not configured in the ConfigMap
*/
public static EntityOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
EntityOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
result = new EntityOperator(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
EntityTopicOperator topicOperator = EntityTopicOperator.fromCrd(reconciliation, kafkaAssembly);
EntityUserOperator userOperator = EntityUserOperator.fromCrd(reconciliation, kafkaAssembly);
TlsSidecar tlsSidecar = entityOperatorSpec.getTlsSidecar();
if (entityOperatorSpec.getTemplate() != null) {
EntityOperatorTemplate template = entityOperatorSpec.getTemplate();
if (template.getDeployment() != null && template.getDeployment().getMetadata() != null) {
result.templateDeploymentLabels = template.getDeployment().getMetadata().getLabels();
result.templateDeploymentAnnotations = template.getDeployment().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
if (template.getTopicOperatorContainer() != null && template.getTopicOperatorContainer().getEnv() != null) {
topicOperator.setContainerEnvVars(template.getTopicOperatorContainer().getEnv());
}
if (template.getTopicOperatorContainer() != null && template.getTopicOperatorContainer().getSecurityContext() != null) {
topicOperator.setContainerSecurityContext(template.getTopicOperatorContainer().getSecurityContext());
}
if (template.getUserOperatorContainer() != null && template.getUserOperatorContainer().getEnv() != null) {
userOperator.setContainerEnvVars(template.getUserOperatorContainer().getEnv());
}
if (template.getUserOperatorContainer() != null && template.getUserOperatorContainer().getSecurityContext() != null) {
userOperator.setContainerSecurityContext(template.getUserOperatorContainer().getSecurityContext());
}
if (template.getTlsSidecarContainer() != null && template.getTlsSidecarContainer().getEnv() != null) {
result.templateTlsSidecarContainerEnvVars = template.getTlsSidecarContainer().getEnv();
}
if (template.getTlsSidecarContainer() != null && template.getTlsSidecarContainer().getSecurityContext() != null) {
result.templateTlsSidecarContainerSecurityContext = template.getTlsSidecarContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
}
result.setTlsSidecar(tlsSidecar);
result.setTopicOperator(topicOperator);
result.setUserOperator(userOperator);
result.setDeployed(result.getTopicOperator() != null || result.getUserOperator() != null);
String tlsSideCarImage = tlsSidecar != null ? tlsSidecar.getImage() : null;
if (tlsSideCarImage == null) {
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
tlsSideCarImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_TLS_SIDECAR_ENTITY_OPERATOR_IMAGE, versions.kafkaImage(kafkaClusterSpec.getImage(), versions.defaultVersion().version()));
}
result.tlsSidecarImage = tlsSideCarImage;
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
}
return result;
}
use of io.strimzi.api.kafka.model.KafkaClusterSpec in project strimzi-kafka-operator by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (kafkaClusterSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
return result;
}
Aggregations