use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class KafkaClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts, Kafka cm) {
assertThat(sts.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Kafka container
assertThat(sts.getSpec().getReplicas(), is(replicas));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(33));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(33));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
if (cm.getSpec().getKafka().getRack() != null) {
Rack rack = cm.getSpec().getKafka().getRack();
// check that the pod spec contains anti-affinity rules with the right topology key
PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
assertThat(podSpec.getAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(terms, is(notNullValue()));
assertThat(terms.size() > 0, is(true));
boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
assertThat(isTopologyKey, is(true));
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
assertThat(isInitKafka, is(true));
}
}
use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class ModelUtilsTest method testCONetworkPolicyPeerNamespaceSelectorDifferentNSWithLabels.
@ParallelTest
public void testCONetworkPolicyPeerNamespaceSelectorDifferentNSWithLabels() {
NetworkPolicyPeer peer = new NetworkPolicyPeer();
Labels nsLabels = Labels.fromMap(singletonMap("labelKey", "labelValue"));
ModelUtils.setClusterOperatorNetworkPolicyNamespaceSelector(peer, "my-ns", "my-operator-ns", nsLabels);
assertThat(peer.getNamespaceSelector().getMatchLabels(), is(nsLabels.toMap()));
}
use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class KafkaST method verifyVolumeNamesAndLabels.
void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) {
ArrayList<String> pvcs = new ArrayList<>();
kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka")).forEach(volume -> {
String volumeName = volume.getMetadata().getName();
pvcs.add(volumeName);
LOGGER.info("Checking labels for volume:" + volumeName);
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka")));
assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi")));
});
LOGGER.info("Checking PVC names included in JBOD array");
for (int i = 0; i < kafkaReplicas; i++) {
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
LOGGER.info("Checking PVC on Kafka pods");
for (int i = 0; i < kafkaReplicas; i++) {
ArrayList<String> dataSourcesOnPod = new ArrayList<>();
ArrayList<String> pvcsOnPod = new ArrayList<>();
LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i)).getSpec().getVolumes().get(j).getName());
pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i)).getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName());
}
LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(dataSourcesOnPod.contains("data-" + j), is(true));
assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
}
use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class KafkaST method testLabelsAndAnnotationForPVC.
@ParallelNamespaceTest
@KRaftNotSupported("JBOD is not supported by KRaft mode and is used in this test case.")
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editSpec().editKafka().withNewTemplate().withNewStatefulset().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endStatefulset().withNewPodSet().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endPodSet().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build()).endKafka().editZookeeper().withNewTemplate().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withNewPersistentClaimStorage().withDeleteClaim(false).withSize("3Gi").endPersistentClaimStorage().endZookeeper().endSpec().build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class AbstractST method verifyLabelsForKafkaAndZKServices.
void verifyLabelsForKafkaAndZKServices(String namespaceName, String clusterName, String appName) {
LOGGER.info("Verifying labels for Services");
String kafkaServiceName = clusterName + "-kafka";
String zookeeperServiceName = clusterName + "-zookeeper";
Map<String, String> servicesMap = new HashMap<>();
servicesMap.put(kafkaServiceName + "-bootstrap", kafkaServiceName);
servicesMap.put(kafkaServiceName + "-brokers", kafkaServiceName);
servicesMap.put(zookeeperServiceName + "-nodes", zookeeperServiceName);
servicesMap.put(zookeeperServiceName + "-client", zookeeperServiceName + "-client");
for (String serviceName : servicesMap.keySet()) {
kubeClient(namespaceName).listServices(namespaceName).stream().filter(service -> service.getMetadata().getName().equals(serviceName)).forEach(service -> {
LOGGER.info("Verifying labels for service {}", serviceName);
assertThat(service.getMetadata().getLabels().get("app"), is(appName));
assertThat(service.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(service.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is("Kafka"));
assertThat(service.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(servicesMap.get(serviceName)));
});
}
}
Aggregations