use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaWithChangedDeleteClaim.
/**
* Test that we can change the deleteClaim flag, and that it's honoured
*/
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaWithChangedDeleteClaim(Params params, VertxTestContext context) {
init(params);
assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Kafka delete claims do not apply to non-persistent volumes");
Map<String, String> kafkaLabels = new HashMap<>();
kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, KafkaCluster.kafkaClusterName(CLUSTER_NAME));
Map<String, String> zkLabels = new HashMap<>();
zkLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
zkLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
zkLabels.put(Labels.STRIMZI_NAME_LABEL, ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME));
AtomicReference<Set<String>> kafkaPvcs = new AtomicReference<>();
AtomicReference<Set<String>> zkPvcs = new AtomicReference<>();
AtomicBoolean originalKafkaDeleteClaim = new AtomicBoolean();
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(kafkaLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
zkPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(zkLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage));
// Try to update the storage class
Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").withStorageClass("foo").withDeleteClaim(!originalKafkaDeleteClaim.get()).endPersistentClaimStorage().endKafka().endSpec().build();
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
LOGGER.info("Updating with changed delete claim");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// check that the new delete-claim annotation is on the PVCs
for (String pvcName : kafkaPvcs.get()) {
assertThat(client.persistentVolumeClaims().inNamespace(NAMESPACE).withName(pvcName).get().getMetadata().getAnnotations(), hasEntry(AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get())));
}
kafkaAssembly(NAMESPACE, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
LOGGER.info("Reconciling again -> delete");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> async.flag()));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaStorageType.
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaStorageType(Params params, VertxTestContext context) {
init(params);
AtomicReference<List<PersistentVolumeClaim>> originalPVCs = new AtomicReference<>();
AtomicReference<List<Volume>> originalVolumes = new AtomicReference<>();
AtomicReference<List<Container>> originalInitContainers = new AtomicReference<>();
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
originalPVCs.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getVolumeClaimTemplates).orElse(new ArrayList<>()));
originalVolumes.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getVolumes).orElse(new ArrayList<>()));
originalInitContainers.set(Optional.ofNullable(client.apps().statefulSets().inNamespace(NAMESPACE).withName(KafkaCluster.kafkaClusterName(CLUSTER_NAME)).get()).map(StatefulSet::getSpec).map(StatefulSetSpec::getTemplate).map(PodTemplateSpec::getSpec).map(PodSpec::getInitContainers).orElse(new ArrayList<>()));
// Update the storage type
// ephemeral -> persistent
// or
// persistent -> ephemeral
Kafka updatedStorageKafka = null;
if (kafkaStorage instanceof EphemeralStorage) {
updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").endPersistentClaimStorage().endKafka().endSpec().build();
} else if (kafkaStorage instanceof PersistentClaimStorage) {
updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewEphemeralStorage().endEphemeralStorage().endKafka().endSpec().build();
} else {
context.failNow(new Exception("If storage is not ephemeral or persistent something has gone wrong"));
}
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
LOGGER.info("Updating with changed storage type");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// Check the Volumes and PVCs were not changed
assertPVCs(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalPVCs.get());
assertVolumes(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalVolumes.get());
assertInitContainers(context, KafkaCluster.kafkaClusterName(CLUSTER_NAME), originalInitContainers.get());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi by strimzi.
the class Capacity method generateDiskCapacity.
/**
* Generate total disk capacity using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @return Disk capacity per broker as a Double
*/
public static Double generateDiskCapacity(Storage storage) {
if (storage instanceof PersistentClaimStorage) {
return getSizeInMiB(((PersistentClaimStorage) storage).getSize());
} else if (storage instanceof EphemeralStorage) {
if (((EphemeralStorage) storage).getSizeLimit() != null) {
return getSizeInMiB(((EphemeralStorage) storage).getSizeLimit());
} else {
return DEFAULT_BROKER_DISK_CAPACITY_IN_MIB;
}
} else if (storage instanceof JbodStorage) {
// The value generated here for JBOD storage is used for tracking the total
// disk capacity per broker. This will NOT be used for the final disk capacity
// configuration since JBOD storage requires a special disk configuration.
List<SingleVolumeStorage> volumeList = ((JbodStorage) storage).getVolumes();
double size = 0;
for (SingleVolumeStorage volume : volumeList) {
size += generateDiskCapacity(volume);
}
return size;
} else {
throw new IllegalStateException("The declared storage '" + storage.getType() + "' is not supported");
}
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class StorageDiff method isOverrideChangeAllowed.
/**
* Validates the changes to the storage overrides and decides whether they are allowed or not. Allowed changes are
* those to nodes which will be added, removed or which do nto exist yet.
*
* @param current Current Storage configuration
* @param desired New storage configuration
* @param currentReplicas Current number of replicas
* @param desiredReplicas Desired number of replicas
* @return True if only allowed override changes were done, false othewise
*/
private boolean isOverrideChangeAllowed(Storage current, Storage desired, int currentReplicas, int desiredReplicas) {
List<PersistentClaimStorageOverride> currentOverrides = ((PersistentClaimStorage) current).getOverrides();
if (currentOverrides == null) {
currentOverrides = Collections.emptyList();
}
List<PersistentClaimStorageOverride> desiredOverrides = ((PersistentClaimStorage) desired).getOverrides();
if (desiredOverrides == null) {
desiredOverrides = Collections.emptyList();
}
// We care only about the nodes which existed before this reconciliation and will still exist after it
int existedAndWillExist = Math.min(currentReplicas, desiredReplicas);
for (int i = 0; i < existedAndWillExist; i++) {
int nodeId = i;
PersistentClaimStorageOverride currentOverride = currentOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
PersistentClaimStorageOverride desiredOverride = desiredOverrides.stream().filter(override -> override.getBroker() == nodeId).findFirst().orElse(null);
if (currentOverride != null && desiredOverride != null) {
// Both overrides exist but are not equal
if (!currentOverride.equals(desiredOverride)) {
return false;
}
} else if (currentOverride != null || desiredOverride != null) {
// One of them is null while the other is not null => they differ
return false;
}
}
return true;
}
use of io.strimzi.api.kafka.model.storage.PersistentClaimStorage in project strimzi-kafka-operator by strimzi.
the class Capacity method generateJbodDiskCapacity.
/**
* Generate JBOD disk capacity configuration for a broker using the supplied storage configuration
*
* @param storage Storage configuration for Kafka cluster
* @param idx Index of the broker
* @return Disk capacity configuration value as a JsonObject for broker idx
*/
private JsonObject generateJbodDiskCapacity(Storage storage, int idx) {
JsonObject json = new JsonObject();
String size = "";
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
String name = VolumeUtils.createVolumePrefix(volume.getId(), true);
String path = AbstractModel.KAFKA_MOUNT_PATH + "/" + name + "/" + AbstractModel.KAFKA_LOG_DIR + idx;
if (volume instanceof PersistentClaimStorage) {
size = ((PersistentClaimStorage) volume).getSize();
} else if (volume instanceof EphemeralStorage) {
size = ((EphemeralStorage) volume).getSizeLimit();
}
json.put(path, String.valueOf(Capacity.getSizeInMiB(size)));
}
return json;
}
Aggregations