use of io.fabric8.kubernetes.api.model.storage.StorageClass in project fabric8-maven-plugin by fabric8io.
the class VolumePermissionEnricher method adapt.
@Override
public void adapt(KubernetesListBuilder builder) {
builder.accept(new TypedVisitor<PodTemplateSpecBuilder>() {
@Override
public void visit(PodTemplateSpecBuilder builder) {
PodSpec podSpec = builder.buildSpec();
if (podSpec == null) {
return;
}
if (!checkForPvc(podSpec)) {
return;
}
List<Container> containers = podSpec.getContainers();
if (containers == null || containers.isEmpty()) {
return;
}
log.verbose("Adding init container for changing persistent volumes access mode to %s", getConfig(Config.permission));
if (!initContainerHandler.hasInitContainer(builder, ENRICHER_NAME)) {
initContainerHandler.appendInitContainer(builder, createPvInitContainer(podSpec));
}
}
private boolean checkForPvc(PodSpec podSpec) {
List<Volume> volumes = podSpec.getVolumes();
if (volumes != null) {
for (Volume volume : volumes) {
PersistentVolumeClaimVolumeSource persistentVolumeClaim = volume.getPersistentVolumeClaim();
if (persistentVolumeClaim != null) {
return true;
}
}
}
return false;
}
private JSONObject createPvInitContainer(PodSpec podSpec) {
Map<String, String> mountPoints = extractMountPoints(podSpec);
JSONObject entry = new JSONObject();
entry.put("name", ENRICHER_NAME);
entry.put("image", "busybox");
entry.put("imagePullPolicy", "IfNotPresent");
entry.put("command", createChmodCommandArray(mountPoints));
entry.put("volumeMounts", createMounts(mountPoints));
return entry;
}
private JSONArray createChmodCommandArray(Map<String, String> mountPoints) {
JSONArray ret = new JSONArray();
ret.put("chmod");
ret.put(getConfig(Config.permission));
Set<String> uniqueNames = new LinkedHashSet<>(mountPoints.values());
for (String name : uniqueNames) {
ret.put(name);
}
return ret;
}
private JSONArray createMounts(Map<String, String> mountPoints) {
JSONArray ret = new JSONArray();
for (Map.Entry<String, String> entry : mountPoints.entrySet()) {
JSONObject mount = new JSONObject();
mount.put("name", entry.getKey());
mount.put("mountPath", entry.getValue());
ret.put(mount);
}
return ret;
}
private Map<String, String> extractMountPoints(PodSpec podSpec) {
Map<String, String> nameToMount = new LinkedHashMap<>();
List<Volume> volumes = podSpec.getVolumes();
if (volumes != null) {
for (Volume volume : volumes) {
PersistentVolumeClaimVolumeSource persistentVolumeClaim = volume.getPersistentVolumeClaim();
if (persistentVolumeClaim != null) {
String name = volume.getName();
String mountPath = getMountPath(podSpec.getContainers(), name);
nameToMount.put(name, mountPath);
}
}
}
return nameToMount;
}
private String getMountPath(List<Container> containers, String name) {
for (Container container : containers) {
List<VolumeMount> volumeMounts = container.getVolumeMounts();
if (volumeMounts != null) {
for (VolumeMount volumeMount : volumeMounts) {
if (name.equals(volumeMount.getName())) {
return volumeMount.getMountPath();
}
}
}
}
throw new IllegalArgumentException("No matching volume mount found for volume " + name);
}
});
builder.accept(new TypedVisitor<PersistentVolumeClaimBuilder>() {
@Override
public void visit(PersistentVolumeClaimBuilder pvcBuilder) {
// lets ensure we have a default storage class so that PVs will get dynamically created OOTB
if (pvcBuilder.buildMetadata() == null) {
pvcBuilder.withNewMetadata().endMetadata();
}
String storageClass = getConfig(Config.defaultStorageClass);
if (Strings.isNotBlank(storageClass) && !pvcBuilder.buildMetadata().getAnnotations().containsKey(VOLUME_STORAGE_CLASS_ANNOTATION)) {
pvcBuilder.editMetadata().addToAnnotations(VOLUME_STORAGE_CLASS_ANNOTATION, storageClass).endMetadata();
}
}
});
}
use of io.fabric8.kubernetes.api.model.storage.StorageClass in project strimzi by strimzi.
the class VolumeResizingTest method testNotBoundVolumes.
@Test
public void testNotBoundVolumes() {
Kafka kafka = getKafkaCrd();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
List<PersistentVolumeClaim> realPvcs = kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage());
when(mockPvcOps.getAsync(eq(namespace), ArgumentMatchers.startsWith("data-"))).thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
return Future.succeededFuture(realPvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null));
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the StorageClass Operator
StorageClassOperator mockSco = supplier.storageClassOperations;
when(mockSco.getAsync(eq("mysc"))).thenAnswer(invocation -> {
StorageClass sc = new StorageClassBuilder().withNewMetadata().withName("mysc").endMetadata().withAllowVolumeExpansion(true).build();
return Future.succeededFuture(sc);
});
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
kao.resizeVolumes(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), kafka, kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage()), kafkaCluster).onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(pvcCaptor.getAllValues().size(), is(3));
assertThat(pvcCaptor.getAllValues(), is(kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage())));
});
}
use of io.fabric8.kubernetes.api.model.storage.StorageClass in project strimzi by strimzi.
the class VolumeResizingTest method testVolumesWaitingForRestart.
@Test
public void testVolumesWaitingForRestart() {
Kafka kafka = getKafkaCrd();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
List<PersistentVolumeClaim> realPvcs = kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage());
for (PersistentVolumeClaim pvc : realPvcs) {
pvc.setStatus(new PersistentVolumeClaimStatusBuilder().withPhase("Bound").withConditions(new PersistentVolumeClaimConditionBuilder().withStatus("True").withType("FileSystemResizePending").build()).withCapacity(singletonMap("storage", new Quantity("10Gi"))).build());
}
when(mockPvcOps.getAsync(eq(namespace), ArgumentMatchers.startsWith("data-"))).thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
return Future.succeededFuture(realPvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null));
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the StorageClass Operator
StorageClassOperator mockSco = supplier.storageClassOperations;
when(mockSco.getAsync(eq("mysc"))).thenAnswer(invocation -> {
StorageClass sc = new StorageClassBuilder().withNewMetadata().withName("mysc").endMetadata().withAllowVolumeExpansion(true).build();
return Future.succeededFuture(sc);
});
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
kao.resizeVolumes(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), kafka, kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage()), kafkaCluster).onComplete(res -> {
assertThat(res.succeeded(), is(true));
// The volumes are waiting for pod restart => no reconciliation
assertThat(pvcCaptor.getAllValues().size(), is(0));
for (int i = 0; i < kafkaCluster.getReplicas(); i++) {
assertThat(res.result().fsResizingRestartRequest.contains(kafkaCluster.getPodName(i)), is(true));
}
});
}
use of io.fabric8.kubernetes.api.model.storage.StorageClass in project strimzi by strimzi.
the class VolumeResizingTest method testVolumesBoundNonExpandableStorageClass.
@Test
public void testVolumesBoundNonExpandableStorageClass() {
Kafka kafka = getKafkaCrd();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
List<PersistentVolumeClaim> realPvcs = kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage());
for (PersistentVolumeClaim pvc : realPvcs) {
pvc.getSpec().getResources().getRequests().put("storage", new Quantity("10Gi"));
pvc.setStatus(new PersistentVolumeClaimStatusBuilder().withPhase("Bound").withCapacity(pvc.getSpec().getResources().getRequests()).build());
}
when(mockPvcOps.getAsync(eq(namespace), ArgumentMatchers.startsWith("data-"))).thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
return Future.succeededFuture(realPvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null));
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the StorageClass Operator
StorageClassOperator mockSco = supplier.storageClassOperations;
when(mockSco.getAsync(eq("mysc"))).thenAnswer(invocation -> {
StorageClass sc = new StorageClassBuilder().withNewMetadata().withName("mysc").endMetadata().withAllowVolumeExpansion(false).build();
return Future.succeededFuture(sc);
});
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
kao.resizeVolumes(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), kafka, kafkaCluster.generatePersistentVolumeClaims(kafka.getSpec().getKafka().getStorage()), kafkaCluster).onComplete(res -> {
assertThat(res.succeeded(), is(true));
// Resizing is not supported, we do not reconcile
assertThat(pvcCaptor.getAllValues().size(), is(0));
});
}
use of io.fabric8.kubernetes.api.model.storage.StorageClass in project strimzi by strimzi.
the class AbstractModel method createPersistentVolumeClaim.
/**
* createPersistentVolumeClaim is called uniquely for each ordinal (Broker ID) of a stateful set
*
* @param ordinalId the ordinal of the pod/broker for which the persistent volume claim is being created
* used to retrieve the optional broker storage overrides for each broker
* @param name the name of the persistent volume claim to be created
* @param storage the user supplied configuration of the PersistentClaimStorage
*
* @return PersistentVolumeClaim
*/
protected PersistentVolumeClaim createPersistentVolumeClaim(int ordinalId, String name, PersistentClaimStorage storage) {
Map<String, Quantity> requests = new HashMap<>(1);
requests.put("storage", new Quantity(storage.getSize(), null));
LabelSelector selector = null;
if (storage.getSelector() != null && !storage.getSelector().isEmpty()) {
selector = new LabelSelector(null, storage.getSelector());
}
String storageClass = storage.getStorageClass();
if (storage.getOverrides() != null) {
storageClass = storage.getOverrides().stream().filter(broker -> broker != null && broker.getBroker() != null && broker.getBroker() == ordinalId && broker.getStorageClass() != null).map(PersistentClaimStorageOverride::getStorageClass).findAny().orElse(storageClass);
}
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder().withNewMetadata().withName(name).withNamespace(namespace).withLabels(getLabelsWithStrimziName(this.name, templatePersistentVolumeClaimLabels).toMap()).withAnnotations(Util.mergeLabelsOrAnnotations(Collections.singletonMap(ANNO_STRIMZI_IO_DELETE_CLAIM, Boolean.toString(storage.isDeleteClaim())), templatePersistentVolumeClaimAnnotations)).endMetadata().withNewSpec().withAccessModes("ReadWriteOnce").withNewResources().withRequests(requests).endResources().withStorageClassName(storageClass).withSelector(selector).withVolumeMode("Filesystem").endSpec().build();
// if the persistent volume claim has to be deleted when the cluster is un-deployed then set an owner reference of the CR
if (storage.isDeleteClaim()) {
pvc.getMetadata().setOwnerReferences(Collections.singletonList(createOwnerReference()));
}
return pvc;
}
Aggregations