use of io.strimzi.api.kafka.model.storage.Storage in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorMockTest method testReconcileUpdatesKafkaWithChangedDeleteClaim.
/**
* Test that we can change the deleteClaim flag, and that it's honoured
*/
@ParameterizedTest
@MethodSource("data")
public void testReconcileUpdatesKafkaWithChangedDeleteClaim(Params params, VertxTestContext context) {
init(params);
assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Kafka delete claims do not apply to non-persistent volumes");
Map<String, String> kafkaLabels = new HashMap<>();
kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, KafkaCluster.kafkaClusterName(CLUSTER_NAME));
Map<String, String> zkLabels = new HashMap<>();
zkLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND);
zkLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME);
zkLabels.put(Labels.STRIMZI_NAME_LABEL, ZookeeperCluster.zookeeperClusterName(CLUSTER_NAME));
AtomicReference<Set<String>> kafkaPvcs = new AtomicReference<>();
AtomicReference<Set<String>> zkPvcs = new AtomicReference<>();
AtomicBoolean originalKafkaDeleteClaim = new AtomicBoolean();
Checkpoint async = context.checkpoint();
initialReconcile(context).onComplete(context.succeeding(v -> context.verify(() -> {
kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(kafkaLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
zkPvcs.set(client.persistentVolumeClaims().inNamespace(NAMESPACE).withLabels(zkLabels).list().getItems().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()));
originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage));
// Try to update the storage class
Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka().withNewPersistentClaimStorage().withSize("123").withStorageClass("foo").withDeleteClaim(!originalKafkaDeleteClaim.get()).endPersistentClaimStorage().endKafka().endSpec().build();
kafkaAssembly(NAMESPACE, CLUSTER_NAME).patch(updatedStorageKafka);
LOGGER.info("Updating with changed delete claim");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> context.verify(() -> {
// check that the new delete-claim annotation is on the PVCs
for (String pvcName : kafkaPvcs.get()) {
assertThat(client.persistentVolumeClaims().inNamespace(NAMESPACE).withName(pvcName).get().getMetadata().getAnnotations(), hasEntry(AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get())));
}
kafkaAssembly(NAMESPACE, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
LOGGER.info("Reconciling again -> delete");
}))).compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))).onComplete(context.succeeding(v -> async.flag()));
}
Aggregations