use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (kafkaClusterSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
return result;
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class AbstractModelTest method testCreatePersistentVolumeClaims.
@ParallelTest
public void testCreatePersistentVolumeClaims() {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build()).withReplicas(2).withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
// JBOD Storage
Storage storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build();
List<PersistentVolumeClaim> pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(4));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
assertThat(pvcs.get(2).getMetadata().getName(), is("data-1-my-cluster-kafka-0"));
assertThat(pvcs.get(3).getMetadata().getName(), is("data-1-my-cluster-kafka-1"));
// JBOD with Ephemeral storage
storage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new EphemeralStorageBuilder().withId(1).build()).build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-0-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-0-my-cluster-kafka-1"));
// Persistent Claim storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Persistent Claim with ID storage
storage = new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(2));
assertThat(pvcs.get(0).getMetadata().getName(), is("data-my-cluster-kafka-0"));
assertThat(pvcs.get(1).getMetadata().getName(), is("data-my-cluster-kafka-1"));
// Ephemeral Storage
storage = new EphemeralStorageBuilder().build();
pvcs = kc.generatePersistentVolumeClaims(storage);
assertThat(pvcs.size(), is(0));
// JBOD Storage without ID
final Storage finalStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("20Gi").build()).build();
InvalidResourceException ex = Assertions.assertThrows(InvalidResourceException.class, () -> kc.generatePersistentVolumeClaims(finalStorage));
assertThat(ex.getMessage(), is("The 'id' property is required for volumes in JBOD storage."));
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class KafkaClusterTest method testStorageReverting.
@ParallelTest
public void testStorageReverting() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
// Test Storage changes and how the are reverted
Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).endKafka().endSpec().build();
KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, ephemeral, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(ephemeral));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(jbod).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, persistent, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(persistent));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(ephemeral).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap())).editSpec().editKafka().withStorage(persistent).endKafka().endSpec().build();
kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas);
// Storage is reverted
assertThat(kc.getStorage(), is(jbod));
// Warning status condition is set
assertThat(kc.getWarningConditions().size(), is(1));
assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage"));
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi-kafka-operator by strimzi.
the class KafkaCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:JavaNCSS" })
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
// This also validates that the Kafka version is supported
result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion());
result.setOwnerReference(kafkaAssembly);
result.setReplicas(kafkaClusterSpec.getReplicas());
validateIntConfigProperty("default.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("offsets.topic.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.replication.factor", kafkaClusterSpec);
validateIntConfigProperty("transaction.state.log.min.isr", kafkaClusterSpec);
result.setImage(versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion()));
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessProbe(kafkaClusterSpec.getReadinessProbe());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessProbe(kafkaClusterSpec.getLivenessProbe());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (kafkaClusterSpec.getJvmOptions() != null) {
result.setJavaSystemProperties(kafkaClusterSpec.getJvmOptions().getJavaSystemProperties());
}
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
if (kafkaClusterSpec.getJmxOptions() != null) {
result.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureKafkaJmxOptions(kafkaClusterSpec.getJmxOptions().getAuthentication(), result);
}
// Handle Kafka broker configuration
KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
validateConfiguration(reconciliation, kafkaAssembly, result.kafkaVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
ModelUtils.parseMetrics(result, kafkaClusterSpec);
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage", "The desired Kafka storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
result.addWarningCondition(warning);
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setResources(kafkaClusterSpec.getResources());
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().isEmpty()) {
LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List<GenericKafkaListener> listeners = kafkaClusterSpec.getListeners();
ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
if (kafkaClusterSpec.getAuthorization() instanceof KafkaAuthorizationKeycloak) {
if (!ListenersUtils.hasListenerWithOAuth(listeners)) {
throw new InvalidResourceException("You cannot configure Keycloak Authorization without any listener with OAuth based authentication");
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
result.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
result.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
result.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
ModelUtils.parseInternalServiceTemplate(result, template.getBootstrapService());
ModelUtils.parseInternalHeadlessServiceTemplate(result, template.getBrokersService());
if (template.getExternalBootstrapService() != null) {
if (template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
}
if (template.getPerPodService() != null) {
if (template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
if (template.getClusterRoleBinding() != null && template.getClusterRoleBinding().getMetadata() != null) {
result.templateClusterRoleBindingLabels = template.getClusterRoleBinding().getMetadata().getLabels();
result.templateClusterRoleBindingAnnotations = template.getClusterRoleBinding().getMetadata().getAnnotations();
}
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
result.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), result.templateStatefulSetLabels);
result.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getEnv() != null) {
result.templateKafkaContainerEnvVars = template.getKafkaContainer().getEnv();
}
if (template.getInitContainer() != null && template.getInitContainer().getEnv() != null) {
result.templateInitContainerEnvVars = template.getInitContainer().getEnv();
}
if (template.getKafkaContainer() != null && template.getKafkaContainer().getSecurityContext() != null) {
result.templateKafkaContainerSecurityContext = template.getKafkaContainer().getSecurityContext();
}
if (template.getInitContainer() != null && template.getInitContainer().getSecurityContext() != null) {
result.templateInitContainerSecurityContext = template.getInitContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
result.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
result.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
result.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
result.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.templatePodLabels = Util.mergeLabelsOrAnnotations(result.templatePodLabels, DEFAULT_POD_LABELS);
return result;
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi-kafka-operator by strimzi.
the class StorageDiffTest method testCrossDiff.
@ParallelTest
public void testCrossDiff() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
StorageDiff diffJbodEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, ephemeral, 3, 3);
StorageDiff diffPersistentEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, ephemeral, 3, 3);
StorageDiff diffJbodPersistent = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, persistent, 3, 3);
assertThat(diffJbodEphemeral.changesType(), is(true));
assertThat(diffPersistentEphemeral.changesType(), is(true));
assertThat(diffJbodPersistent.changesType(), is(true));
assertThat(diffJbodEphemeral.isEmpty(), is(false));
assertThat(diffPersistentEphemeral.isEmpty(), is(false));
assertThat(diffJbodPersistent.isEmpty(), is(false));
assertThat(diffJbodEphemeral.isVolumesAddedOrRemoved(), is(false));
assertThat(diffPersistentEphemeral.isVolumesAddedOrRemoved(), is(false));
assertThat(diffJbodPersistent.isVolumesAddedOrRemoved(), is(false));
}
Aggregations