use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class KafkaBrokerConfigurationBuilderTest method testEphemeralStorageLogDirs.
@ParallelTest
public void testEphemeralStorageLogDirs() {
Storage storage = new EphemeralStorageBuilder().withSizeLimit("5Gi").build();
String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withLogDirs(VolumeUtils.createVolumeMounts(storage, "/var/lib/kafka", false)).build();
assertThat(configuration, isEquivalent("log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}"));
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaPersistentVolumes.
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaPersistentVolumes(Params params, VertxTestContext context) {
init(params);
assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Parameterized Test only runs for Params with Kafka Persistent storage");
String originalStorageClass = Storage.storageClass(kafkaStorage);
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
// Try to update the storage class
String changedClass = originalStorageClass + "2";
Kafka patchedPersistenceKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withStorageClass(changedClass).withSize("123").endPersistentClaimStorage().endKafka().endSpec().build();
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(patchedPersistenceKafka);
LOGGER.info("Updating with changed storage class");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// Check the storage class was not changed
assertStorageClass(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalStorageClass);
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockTest method data.
public static Iterable<KafkaAssemblyOperatorMockTest.Params> data() {
int[] replicas = { 1, 3 };
int[] storageOptions = { 0, 1, 2 };
Storage[] kafkaStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
SingleVolumeStorage[] zkStorageConfigs = { new EphemeralStorage(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(true).build(), new PersistentClaimStorageBuilder().withSize("123").withStorageClass("foo").withDeleteClaim(false).build() };
ResourceRequirements[] resources = { new ResourceRequirementsBuilder().addToLimits("cpu", new Quantity("5000m")).addToLimits("memory", new Quantity("5000m")).addToRequests("cpu", new Quantity("5000")).addToRequests("memory", new Quantity("5000m")).build() };
List<KafkaAssemblyOperatorMockTest.Params> result = new ArrayList();
for (int replicaCount : replicas) {
for (int storage : storageOptions) {
for (ResourceRequirements resource : resources) {
result.add(new KafkaAssemblyOperatorMockTest.Params(replicaCount, zkStorageConfigs[storage], replicaCount, kafkaStorageConfigs[storage], resource));
}
}
}
return result;
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class ModelUtilsTest method testStorageSerializationAndDeserialization.
@ParallelTest
public void testStorageSerializationAndDeserialization() {
Storage jbod = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()).build();
Storage ephemeral = new EphemeralStorageBuilder().build();
Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build();
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(jbod)), is(jbod));
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(ephemeral)), is(ephemeral));
assertThat(ModelUtils.decodeStorageFromJson(ModelUtils.encodeStorageToJson(persistent)), is(persistent));
}
use of io.strimzi.api.kafka.model.storage.Storage in project strimzi by strimzi.
the class ZookeeperCluster method fromCrd.
@SuppressWarnings({ "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity" })
public static ZookeeperCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
ZookeeperCluster zk = new ZookeeperCluster(reconciliation, kafkaAssembly);
zk.setOwnerReference(kafkaAssembly);
ZookeeperClusterSpec zookeeperClusterSpec = kafkaAssembly.getSpec().getZookeeper();
int replicas = zookeeperClusterSpec.getReplicas();
if (replicas <= 0) {
replicas = ZookeeperClusterSpec.DEFAULT_REPLICAS;
}
if (replicas == 1 && zookeeperClusterSpec.getStorage() != null && "ephemeral".equals(zookeeperClusterSpec.getStorage().getType())) {
LOGGER.warnCr(reconciliation, "A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used.");
}
zk.setReplicas(replicas);
// Get the ZK version information from either the CRD or from the default setting
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
String version = versions.supportedVersion(kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null).zookeeperVersion();
zk.setVersion(version);
String image = zookeeperClusterSpec.getImage();
if (image == null) {
image = versions.kafkaImage(kafkaClusterSpec != null ? kafkaClusterSpec.getImage() : null, kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null);
}
zk.setImage(image);
if (zookeeperClusterSpec.getReadinessProbe() != null) {
zk.setReadinessProbe(zookeeperClusterSpec.getReadinessProbe());
}
if (zookeeperClusterSpec.getLivenessProbe() != null) {
zk.setLivenessProbe(zookeeperClusterSpec.getLivenessProbe());
}
Logging logging = zookeeperClusterSpec.getLogging();
zk.setLogging(logging == null ? new InlineLogging() : logging);
zk.setGcLoggingEnabled(zookeeperClusterSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : zookeeperClusterSpec.getJvmOptions().isGcLoggingEnabled());
if (zookeeperClusterSpec.getJvmOptions() != null) {
zk.setJavaSystemProperties(zookeeperClusterSpec.getJvmOptions().getJavaSystemProperties());
}
// Parse different types of metrics configurations
ModelUtils.parseMetrics(zk, zookeeperClusterSpec);
if (oldStorage != null) {
Storage newStorage = zookeeperClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, zookeeperClusterSpec.getReplicas());
if (!diff.isEmpty()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Zookeeper storage are allowed: " + "changing the deleteClaim flag, " + "changing overrides to nodes which do not exist yet " + "and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired ZooKeeper storage configuration in the custom resource {}/{} contains changes which are not allowed. As " + "a result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("ZooKeeperStorage", "The desired ZooKeeper storage configuration contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.");
zk.addWarningCondition(warning);
zk.setStorage(oldStorage);
} else {
zk.setStorage(newStorage);
}
} else {
zk.setStorage(zookeeperClusterSpec.getStorage());
}
zk.setConfiguration(new ZookeeperConfiguration(reconciliation, zookeeperClusterSpec.getConfig().entrySet()));
zk.setResources(zookeeperClusterSpec.getResources());
zk.setJvmOptions(zookeeperClusterSpec.getJvmOptions());
if (zookeeperClusterSpec.getJmxOptions() != null) {
zk.setJmxEnabled(Boolean.TRUE);
AuthenticationUtils.configureZookeeperJmxOptions(zookeeperClusterSpec.getJmxOptions().getAuthentication(), zk);
}
if (zookeeperClusterSpec.getTemplate() != null) {
ZookeeperClusterTemplate template = zookeeperClusterSpec.getTemplate();
if (template.getStatefulset() != null) {
if (template.getStatefulset().getPodManagementPolicy() != null) {
zk.templatePodManagementPolicy = template.getStatefulset().getPodManagementPolicy();
}
if (template.getStatefulset().getMetadata() != null) {
zk.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
zk.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
}
if (template.getPodSet() != null && template.getPodSet().getMetadata() != null) {
zk.templatePodSetLabels = template.getPodSet().getMetadata().getLabels();
zk.templatePodSetAnnotations = template.getPodSet().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(zk, template.getPod());
ModelUtils.parseInternalServiceTemplate(zk, template.getClientService());
ModelUtils.parseInternalHeadlessServiceTemplate(zk, template.getNodesService());
if (template.getPersistentVolumeClaim() != null && template.getPersistentVolumeClaim().getMetadata() != null) {
zk.templatePersistentVolumeClaimLabels = Util.mergeLabelsOrAnnotations(template.getPersistentVolumeClaim().getMetadata().getLabels(), zk.templateStatefulSetLabels);
zk.templatePersistentVolumeClaimAnnotations = template.getPersistentVolumeClaim().getMetadata().getAnnotations();
}
if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getEnv() != null) {
zk.templateZookeeperContainerEnvVars = template.getZookeeperContainer().getEnv();
}
if (template.getZookeeperContainer() != null && template.getZookeeperContainer().getSecurityContext() != null) {
zk.templateZookeeperContainerSecurityContext = template.getZookeeperContainer().getSecurityContext();
}
if (template.getServiceAccount() != null && template.getServiceAccount().getMetadata() != null) {
zk.templateServiceAccountLabels = template.getServiceAccount().getMetadata().getLabels();
zk.templateServiceAccountAnnotations = template.getServiceAccount().getMetadata().getAnnotations();
}
if (template.getJmxSecret() != null && template.getJmxSecret().getMetadata() != null) {
zk.templateJmxSecretLabels = template.getJmxSecret().getMetadata().getLabels();
zk.templateJmxSecretAnnotations = template.getJmxSecret().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(zk, template.getPodDisruptionBudget());
}
zk.templatePodLabels = Util.mergeLabelsOrAnnotations(zk.templatePodLabels, DEFAULT_POD_LABELS);
return zk;
}
Aggregations