use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class ClusterOperatorConfig method parseLabels.
/**
* Parse labels from String into the Labels format.
*
* @param vars Map with configuration variables
* @param configurationKey Key containing the string with labels
* @return Labels object with the Labels or null if no labels are configured
*/
private static Labels parseLabels(Map<String, String> vars, String configurationKey) {
String labelsString = vars.get(configurationKey);
Labels labels = null;
if (labelsString != null) {
try {
labels = Labels.fromString(labelsString);
} catch (Exception e) {
throw new InvalidConfigurationException("Failed to parse labels from " + configurationKey, e);
}
}
return labels;
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts, Kafka cm) {
assertThat(sts.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Kafka container
assertThat(sts.getSpec().getReplicas(), is(replicas));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(33));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(healthTimeout));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(healthDelay));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(10));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(4));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(33));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
if (cm.getSpec().getKafka().getRack() != null) {
Rack rack = cm.getSpec().getKafka().getRack();
// check that the pod spec contains anti-affinity rules with the right topology key
PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
assertThat(podSpec.getAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(terms, is(notNullValue()));
assertThat(terms.size() > 0, is(true));
boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
assertThat(isTopologyKey, is(true));
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
assertThat(isInitKafka, is(true));
}
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class JbodStorageMockTest method getPvcs.
private List<PersistentVolumeClaim> getPvcs(String namespace, String name) {
String kafkaStsName = KafkaResources.kafkaStatefulSetName(name);
Labels pvcSelector = Labels.forStrimziCluster(name).withStrimziKind(Kafka.RESOURCE_KIND).withStrimziName(kafkaStsName);
return client.persistentVolumeClaims().inNamespace(namespace).withLabels(pvcSelector.toMap()).list().getItems();
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorNonParametrizedTest method testCustomLabelsAndAnnotations.
@Test
public void testCustomLabelsAndAnnotations(VertxTestContext context) {
Map<String, String> labels = new HashMap<>(2);
labels.put("label1", "value1");
labels.put("label2", "value2");
Map<String, String> annos = new HashMap<>(2);
annos.put("anno1", "value3");
annos.put("anno2", "value4");
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().withNewTemplate().withNewClusterCaCert().withNewMetadata().withAnnotations(annos).withLabels(labels).endMetadata().endClusterCaCert().endTemplate().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().endSpec().build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
PodOperator podOps = supplier.podOperations;
ArgumentCaptor<Secret> clusterCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clusterCaKey = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaKey = ArgumentCaptor.forClass(Secret.class);
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaCertificateSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(ClusterOperator.secretName(NAME)), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(podOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of()));
KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L));
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
op.new ReconciliationState(reconciliation, kafka).reconcileCas(() -> new Date()).onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(clusterCaCert.getAllValues(), hasSize(1));
assertThat(clusterCaKey.getAllValues(), hasSize(1));
assertThat(clientsCaCert.getAllValues(), hasSize(1));
assertThat(clientsCaKey.getAllValues(), hasSize(1));
Secret clusterCaCertSecret = clusterCaCert.getValue();
Secret clusterCaKeySecret = clusterCaKey.getValue();
Secret clientsCaCertSecret = clientsCaCert.getValue();
Secret clientsCaKeySecret = clientsCaKey.getValue();
for (Map.Entry<String, String> entry : annos.entrySet()) {
assertThat(clusterCaCertSecret.getMetadata().getAnnotations(), hasEntry(entry.getKey(), entry.getValue()));
assertThat(clusterCaKeySecret.getMetadata().getAnnotations(), not(hasEntry(entry.getKey(), entry.getValue())));
assertThat(clientsCaCertSecret.getMetadata().getAnnotations(), not(hasEntry(entry.getKey(), entry.getValue())));
assertThat(clientsCaKeySecret.getMetadata().getAnnotations(), not(hasEntry(entry.getKey(), entry.getValue())));
}
for (Map.Entry<String, String> entry : labels.entrySet()) {
assertThat(clusterCaCertSecret.getMetadata().getLabels(), hasEntry(entry.getKey(), entry.getValue()));
assertThat(clusterCaKeySecret.getMetadata().getLabels(), not(hasEntry(entry.getKey(), entry.getValue())));
assertThat(clientsCaCertSecret.getMetadata().getLabels(), not(hasEntry(entry.getKey(), entry.getValue())));
assertThat(clientsCaKeySecret.getMetadata().getLabels(), not(hasEntry(entry.getKey(), entry.getValue())));
}
async.flag();
})));
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class KafkaAssemblyOperatorNonParametrizedTest method testSelectorLabels.
@Test
public void testSelectorLabels(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().endSpec().build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config = new ClusterOperatorConfig(singleton("dummy"), 60_000, 120_000, 300_000, false, true, KafkaVersionTestUtils.getKafkaVersionLookup(), null, null, null, null, Labels.fromMap(Map.of("selectorLabel", "value")), "", 10, 10_000, 30, false, 1024);
KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_19), certManager, passwordGenerator, supplier, config);
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
op.reconcile(reconciliation).onComplete(context.succeeding(v -> context.verify(() -> {
// The resource labels don't match the selector labels => the reconciliation should exit right on
// beginning with success. It should not reconcile any resources other than getting the Kafka
// resource it self.
verifyNoInteractions(supplier.stsOperations, supplier.serviceOperations, supplier.secretOperations, supplier.configMapOperations, supplier.podOperations, supplier.podDisruptionBudgetOperator, supplier.deploymentOperations);
async.flag();
})));
}
Aggregations