Search in sources :

Example 1 with PersistentClaimStorage

use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaWithChangedDeleteClaim.

/**
 * Test that we can change the deleteClaim flag, and that it's honoured
 */
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaWithChangedDeleteClaim(Params params, VertxTestContext context) {
    init(params);
    assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Kafka delete claims do not apply to non-persistent volumes");
    Map<String, String> kafkaLabels = new HashMap<>();
    kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
    kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
    kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, KafkaCluster.kafkaClusterName(CLUSTER_NAME));
    Map<String, String> zkLabels = new HashMap<>();
    zkLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
    zkLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
    zkLabels.put(Labels.STRIMZI_NAME_LABEL, ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME));
    AtomicReference<Set<String>> kafkaPvcs = new AtomicReference<>();
    AtomicReference<Set<String>> zkPvcs = new AtomicReference<>();
    AtomicBoolean originalKafkaDeleteClaim = new AtomicBoolean();
    Checkpoint async = context.checkpoint();
    initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
        kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(kafkaLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
        zkPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(zkLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
        originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage));
        // Try to update the storage class
        Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").withStorageClass("foo").withDeleteClaim(!originalKafkaDeleteClaim.get()).endPersistentClaimStorage().endKafka().endSpec().build();
        kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
        LOGGER.info("Updating with changed delete claim");
    }))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
        // check that the new delete-claim annotation is on the PVCs
        for (String pvcName : kafkaPvcs.get()) {
            assertThat(client.persistentVolumeClaims().inNamespace(NAMESPACE).withName(pvcName).get().getMetadata().getAnnotations(), hasEntry(AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get())));
        }
        kafkaAssembly(NAMESPACE, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
        LOGGER.info("Reconciling again -> delete");
    }))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> async.flag()));
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) CoreMatchers.is(org.hamcrest.CoreMatchers.is) DeletionPropagation(io.fabric8.kubernetes.api.model.DeletionPropagation) Storage(io.strimzi.api.kafka.model.storage.Storage) StatefulSetStatus(io.fabric8.kubernetes.api.model.apps.StatefulSetStatus) PodTemplateSpec(io.fabric8.kubernetes.api.model.PodTemplateSpec) MockKube(io.strimzi.test.mockkube.MockKube) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) AfterAll(org.junit.jupiter.api.AfterAll) Resource(io.fabric8.kubernetes.client.dsl.Resource) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) ResourceRequirements(io.fabric8.kubernetes.api.model.ResourceRequirements) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Ca(io.strimzi.operator.cluster.model.Ca) BeforeAll(org.junit.jupiter.api.BeforeAll) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Matchers.nullValue(org.hamcrest.Matchers.nullValue) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) AbstractModel(io.strimzi.operator.cluster.model.AbstractModel) StatefulSetOperator(io.strimzi.operator.cluster.operator.resource.StatefulSetOperator) MethodSource(org.junit.jupiter.params.provider.MethodSource) KafkaVersion(io.strimzi.operator.cluster.model.KafkaVersion) Set(java.util.Set) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) StatefulSetSpec(io.fabric8.kubernetes.api.model.apps.StatefulSetSpec) List(java.util.List) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) KafkaConfiguration(io.strimzi.operator.cluster.model.KafkaConfiguration) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) PasswordGenerator(io.strimzi.operator.common.PasswordGenerator) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) MockCertManager(io.strimzi.operator.common.operator.MockCertManager) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaList(io.strimzi.api.kafka.KafkaList) Matchers.aMapWithSize(org.hamcrest.Matchers.aMapWithSize) Container(io.fabric8.kubernetes.api.model.Container) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SingleVolumeStorage(io.strimzi.api.kafka.model.storage.SingleVolumeStorage) HashMap(java.util.HashMap) Storage.deleteClaim(io.strimzi.api.kafka.model.storage.Storage.deleteClaim) Crds(io.strimzi.api.kafka.Crds) ZookeeperCluster(io.strimzi.operator.cluster.model.ZookeeperCluster) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) FeatureGates(io.strimzi.operator.cluster.FeatureGates) ArrayList(java.util.ArrayList) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) KafkaCluster(io.strimzi.operator.cluster.model.KafkaCluster) ClusterOperator(io.strimzi.operator.cluster.ClusterOperator) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) ZookeeperLeaderFinder(io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder) Assumptions.assumeTrue(org.junit.jupiter.api.Assumptions.assumeTrue) Matchers.hasSize(org.hamcrest.Matchers.hasSize) CustomResourceDefinition(io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Volume(io.fabric8.kubernetes.api.model.Volume) Matchers.hasEntry(org.hamcrest.Matchers.hasEntry) StrimziPodSetList(io.strimzi.api.kafka.StrimziPodSetList) KubernetesVersion(io.strimzi.operator.KubernetesVersion) Vertx(io.vertx.core.Vertx) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) Reconciliation(io.strimzi.operator.common.Reconciliation) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Kafka(io.strimzi.api.kafka.model.Kafka) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) Set(java.util.Set) StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) HashMap(java.util.HashMap) Kafka(io.strimzi.api.kafka.model.Kafka) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Checkpoint(io.vertx.junit5.Checkpoint) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) Reconciliation(io.strimzi.operator.common.Reconciliation) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 2 with PersistentClaimStorage

use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.

the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaStorageType.

@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaStorageType(Params params, VertxTestContext context) {
    init(params);
    AtomicReference<List<PersistentVolumeClaim>> originalPVCs = new AtomicReference<>();
    AtomicReference<List<Volume>> originalVolumes = new AtomicReference<>();
    AtomicReference<List<Container>> originalInitContainers = new AtomicReference<>();
    Checkpoint async = context.checkpoint();
    initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
        originalPVCs.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getVolumeClaimTemplates).orElse(new ArrayList<>()));
        originalVolumes.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getVolumes).orElse(new ArrayList<>()));
        originalInitContainers.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getInitContainers).orElse(new ArrayList<>()));
        // Update the storage type
        // ephemeral -> persistent
        // or
        // persistent -> ephemeral
        Kafka updatedStorageKafka = null;
        if (kafkaStorage instanceof EphemeralStorage) {
            updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").endPersistentClaimStorage().endKafka().endSpec().build();
        } else if (kafkaStorage instanceof PersistentClaimStorage) {
            updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
        } else {
            context.failNow(new Exception("If storage is not ephemeral or persistent something has gone wrong"));
        }
        kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
        LOGGER.info("Updating with changed storage type");
    }))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
        // Check the Volumes and PVCs were not changed
        assertPVCs(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalPVCs.get());
        assertVolumes(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalVolumes.get());
        assertInitContainers(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalInitContainers.get());
        async.flag();
    })));
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) CoreMatchers.is(org.hamcrest.CoreMatchers.is) DeletionPropagation(io.fabric8.kubernetes.api.model.DeletionPropagation) Storage(io.strimzi.api.kafka.model.storage.Storage) StatefulSetStatus(io.fabric8.kubernetes.api.model.apps.StatefulSetStatus) PodTemplateSpec(io.fabric8.kubernetes.api.model.PodTemplateSpec) MockKube(io.strimzi.test.mockkube.MockKube) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) AfterAll(org.junit.jupiter.api.AfterAll) Resource(io.fabric8.kubernetes.client.dsl.Resource) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) ResourceRequirements(io.fabric8.kubernetes.api.model.ResourceRequirements) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Ca(io.strimzi.operator.cluster.model.Ca) BeforeAll(org.junit.jupiter.api.BeforeAll) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Matchers.nullValue(org.hamcrest.Matchers.nullValue) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) AbstractModel(io.strimzi.operator.cluster.model.AbstractModel) StatefulSetOperator(io.strimzi.operator.cluster.operator.resource.StatefulSetOperator) MethodSource(org.junit.jupiter.params.provider.MethodSource) KafkaVersion(io.strimzi.operator.cluster.model.KafkaVersion) Set(java.util.Set) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) StatefulSetSpec(io.fabric8.kubernetes.api.model.apps.StatefulSetSpec) List(java.util.List) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) KafkaConfiguration(io.strimzi.operator.cluster.model.KafkaConfiguration) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) StrimziPodSet(io.strimzi.api.kafka.model.StrimziPodSet) PasswordGenerator(io.strimzi.operator.common.PasswordGenerator) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) MockCertManager(io.strimzi.operator.common.operator.MockCertManager) VertxTestContext(io.vertx.junit5.VertxTestContext) KafkaList(io.strimzi.api.kafka.KafkaList) Matchers.aMapWithSize(org.hamcrest.Matchers.aMapWithSize) Container(io.fabric8.kubernetes.api.model.Container) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SingleVolumeStorage(io.strimzi.api.kafka.model.storage.SingleVolumeStorage) HashMap(java.util.HashMap) Storage.deleteClaim(io.strimzi.api.kafka.model.storage.Storage.deleteClaim) Crds(io.strimzi.api.kafka.Crds) ZookeeperCluster(io.strimzi.operator.cluster.model.ZookeeperCluster) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) FeatureGates(io.strimzi.operator.cluster.FeatureGates) ArrayList(java.util.ArrayList) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) KafkaCluster(io.strimzi.operator.cluster.model.KafkaCluster) ClusterOperator(io.strimzi.operator.cluster.ClusterOperator) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) ZookeeperLeaderFinder(io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder) Assumptions.assumeTrue(org.junit.jupiter.api.Assumptions.assumeTrue) Matchers.hasSize(org.hamcrest.Matchers.hasSize) CustomResourceDefinition(io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Volume(io.fabric8.kubernetes.api.model.Volume) Matchers.hasEntry(org.hamcrest.Matchers.hasEntry) StrimziPodSetList(io.strimzi.api.kafka.StrimziPodSetList) KubernetesVersion(io.strimzi.operator.KubernetesVersion) Vertx(io.vertx.core.Vertx) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) Reconciliation(io.strimzi.operator.common.Reconciliation) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Kafka(io.strimzi.api.kafka.model.Kafka) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) StatefulSetSpec(io.fabric8.kubernetes.api.model.apps.StatefulSetSpec) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) ArrayList(java.util.ArrayList) Kafka(io.strimzi.api.kafka.model.Kafka) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) Checkpoint(io.vertx.junit5.Checkpoint) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) Reconciliation(io.strimzi.operator.common.Reconciliation) List(java.util.List) KafkaList(io.strimzi.api.kafka.KafkaList) ArrayList(java.util.ArrayList) StrimziPodSetList(io.strimzi.api.kafka.StrimziPodSetList) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 3 with PersistentClaimStorage

use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.

the class Capacity method generateDiskCapacity.

/**
 * Generate total disk capacity using the supplied storage configuration
 *
 * @param storage Storage configuration for Kafka cluster
 * @return Disk capacity per broker as a Double
 */
public static Double generateDiskCapacity(Storage storage) {
    if (storage instanceof PersistentClaimStorage) {
        return getSizeInMiB(((PersistentClaimStorage) storage).getSize());
    } else if (storage instanceof EphemeralStorage) {
        if (((EphemeralStorage) storage).getSizeLimit() != null) {
            return getSizeInMiB(((EphemeralStorage) storage).getSizeLimit());
        } else {
            return DEFAULT_BROKER_DISK_CAPACITY_IN_MIB;
        }
    } else if (storage instanceof JbodStorage) {
        // The value generated here for JBOD storage is used for tracking the total
        // disk capacity per broker. This will NOT be used for the final disk capacity
        // configuration since JBOD storage requires a special disk configuration.
        List<SingleVolumeStorage> volumeList = ((JbodStorage) storage).getVolumes();
        double size = 0;
        for (SingleVolumeStorage volume : volumeList) {
            size += generateDiskCapacity(volume);
        }
        return size;
    } else {
        throw new IllegalStateException("The declared storage '" + storage.getType() + "' is not supported");
    }
}
Also used : SingleVolumeStorage(io.strimzi.api.kafka.model.storage.SingleVolumeStorage) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) JbodStorage(io.strimzi.api.kafka.model.storage.JbodStorage)

Example 4 with PersistentClaimStorage

use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.

the class StorageDiff method isOverrideChangeAllowed.

/**
 * Validates the changes to the storage overrides and decides whether they are allowed or not. Allowed changes are
 * those to nodes which will be added, removed or which do nto exist yet.
 *
 * @param current           Current Storage configuration
 * @param desired           New storage configuration
 * @param currentReplicas   Current number of replicas
 * @param desiredReplicas   Desired number of replicas
 * @return                  True if only allowed override changes were done, false othewise
 */
private boolean isOverrideChangeAllowed(Storage current, Storage desired, int currentReplicas, int desiredReplicas) {
    List<PersistentClaimStorageOverride> currentOverrides = ((PersistentClaimStorage) current).getOverrides();
    if (currentOverrides == null) {
        currentOverrides = Collections.emptyList();
    }
    List<PersistentClaimStorageOverride> desiredOverrides = ((PersistentClaimStorage) desired).getOverrides();
    if (desiredOverrides == null) {
        desiredOverrides = Collections.emptyList();
    }
    // We care only about the nodes which existed before this reconciliation and will still exist after it
    int existedAndWillExist = Math.min(currentReplicas, desiredReplicas);
    for (int i = 0; i < existedAndWillExist; i++) {
        int nodeId = i;
        PersistentClaimStorageOverride currentOverride = currentOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
        PersistentClaimStorageOverride desiredOverride = desiredOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
        if (currentOverride != null && desiredOverride != null) {
            // Both overrides exist but are not equal
            if (!currentOverride.equals(desiredOverride)) {
                return false;
            }
        } else if (currentOverride != null || desiredOverride != null) {
            // One of them is null while the other is not null => they differ
            return false;
        }
    }
    return true;
}
Also used : PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) PersistentClaimStorageOverride(io.strimzi.api.kafka.model.storage.PersistentClaimStorageOverride)

Example 5 with PersistentClaimStorage

use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.

the class Capacity method generateJbodDiskCapacity.

/**
 * Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
 *
 * @param storage Storage configuration for Kafka cluster
 * @param idx Index of the broker
 * @return Disk capacity configuration value as a JsonObject for broker idx
 */
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
    JsonObject json = new JsonObject();
    String size = "";
    for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
        String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
        String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
        if (volume instanceof PersistentClaimStorage) {
            size = ((PersistentClaimStorage) volume).getSize();
        } else if (volume instanceof EphemeralStorage) {
            size = ((EphemeralStorage) volume).getSizeLimit();
        }
        json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
    }
    return json;
}
Also used : SingleVolumeStorage(io.strimzi.api.kafka.model.storage.SingleVolumeStorage) PersistentClaimStorage(io.strimzi.api.kafka.model.storage.PersistentClaimStorage) JsonObject(io.vertx.core.json.JsonObject) EphemeralStorage(io.strimzi.api.kafka.model.storage.EphemeralStorage) JbodStorage(io.strimzi.api.kafka.model.storage.JbodStorage)

Aggregations

PersistentClaimStorage (io.strimzi.api.kafka.model.storage.PersistentClaimStorage)26 SingleVolumeStorage (io.strimzi.api.kafka.model.storage.SingleVolumeStorage)20 ArrayList (java.util.ArrayList)14 JbodStorage (io.strimzi.api.kafka.model.storage.JbodStorage)13 PersistentVolumeClaim (io.fabric8.kubernetes.api.model.PersistentVolumeClaim)12 EphemeralStorage (io.strimzi.api.kafka.model.storage.EphemeralStorage)12 Kafka (io.strimzi.api.kafka.model.Kafka)10 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)10 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)10 PersistentClaimStorageBuilder (io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder)10 Checkpoint (io.vertx.junit5.Checkpoint)10 Volume (io.fabric8.kubernetes.api.model.Volume)8 CustomResourceDefinition (io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition)8 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 Crds (io.strimzi.api.kafka.Crds)8 KafkaList (io.strimzi.api.kafka.KafkaList)8 StrimziPodSetList (io.strimzi.api.kafka.StrimziPodSetList)8 StrimziPodSet (io.strimzi.api.kafka.model.StrimziPodSet)8 KafkaListenerType (io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType)8 Storage (io.strimzi.api.kafka.model.storage.Storage)8