use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class OperatorMetricsTest method failingReconcile.
public void failingReconcile(VertxTestContext context, Labels selectorLabels) {
MetricsProvider metrics = createCleanMetricsProvider();
AbstractWatchableStatusedResourceOperator resourceOperator = resourceOperatorWithExistingResourceWithSelectorLabel(selectorLabels);
AbstractOperator operator = new AbstractOperator(vertx, "TestResource", resourceOperator, metrics, selectorLabels) {
@Override
protected Future createOrUpdate(Reconciliation reconciliation, CustomResource resource) {
return Future.failedFuture(new RuntimeException("Test error"));
}
@Override
public Set<Condition> validate(Reconciliation reconciliation, CustomResource resource) {
// Do nothing
return emptySet();
}
@Override
protected Future<Boolean> delete(Reconciliation reconciliation) {
return null;
}
@Override
protected Status createStatus() {
return new Status() {
};
}
};
Checkpoint async = context.checkpoint();
operator.reconcile(new Reconciliation("test", "TestResource", "my-namespace", "my-resource")).onComplete(context.failing(v -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
Tag selectorTag = Tag.of("selector", selectorLabels != null ? selectorLabels.toSelectorString() : "");
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.failed").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.failed").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().count(), is(1L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resource.state").tag("kind", "TestResource").tag("name", "my-resource").tag("resource-namespace", "my-namespace").tag("reason", "Test error").gauge().value(), is(0.0));
async.flag();
})));
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class UserOperatorConfig method fromMap.
/**
* Loads configuration parameters from a related map
*
* @param map map from which loading configuration parameters
* @return Cluster Operator configuration instance
*/
@SuppressWarnings({ "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity" })
public static UserOperatorConfig fromMap(Map<String, String> map) {
String namespace = map.get(UserOperatorConfig.STRIMZI_NAMESPACE);
if (namespace == null || namespace.isEmpty()) {
throw new InvalidConfigurationException(UserOperatorConfig.STRIMZI_NAMESPACE + " cannot be null");
}
long reconciliationInterval = DEFAULT_FULL_RECONCILIATION_INTERVAL_MS;
String reconciliationIntervalEnvVar = map.get(UserOperatorConfig.STRIMZI_FULL_RECONCILIATION_INTERVAL_MS);
if (reconciliationIntervalEnvVar != null) {
reconciliationInterval = Long.parseLong(reconciliationIntervalEnvVar);
}
int scramPasswordLength = DEFAULT_SCRAM_SHA_PASSWORD_LENGTH;
String scramPasswordLengthEnvVar = map.get(UserOperatorConfig.STRIMZI_SCRAM_SHA_PASSWORD_LENGTH);
if (scramPasswordLengthEnvVar != null) {
scramPasswordLength = Integer.parseInt(scramPasswordLengthEnvVar);
}
String kafkaBootstrapServers = DEFAULT_KAFKA_BOOTSTRAP_SERVERS;
String kafkaBootstrapServersEnvVar = map.get(UserOperatorConfig.STRIMZI_KAFKA_BOOTSTRAP_SERVERS);
if (kafkaBootstrapServersEnvVar != null && !kafkaBootstrapServersEnvVar.isEmpty()) {
kafkaBootstrapServers = kafkaBootstrapServersEnvVar;
}
Labels labels;
try {
labels = Labels.fromString(map.get(STRIMZI_LABELS));
} catch (Exception e) {
throw new InvalidConfigurationException("Failed to parse labels from " + STRIMZI_LABELS, e);
}
String caCertSecretName = map.get(UserOperatorConfig.STRIMZI_CA_CERT_SECRET_NAME);
if (caCertSecretName == null || caCertSecretName.isEmpty()) {
throw new InvalidConfigurationException(UserOperatorConfig.STRIMZI_CA_CERT_SECRET_NAME + " cannot be null");
}
String caKeySecretName = map.get(UserOperatorConfig.STRIMZI_CA_KEY_SECRET_NAME);
if (caKeySecretName == null || caKeySecretName.isEmpty()) {
throw new InvalidConfigurationException(UserOperatorConfig.STRIMZI_CA_KEY_SECRET_NAME + " cannot be null");
}
String clusterCaCertSecretName = map.get(UserOperatorConfig.STRIMZI_CLUSTER_CA_CERT_SECRET_NAME);
String euoKeySecretName = map.get(UserOperatorConfig.STRIMZI_EO_KEY_SECRET_NAME);
String caNamespace = map.get(UserOperatorConfig.STRIMZI_CA_NAMESPACE);
if (caNamespace == null || caNamespace.isEmpty()) {
caNamespace = namespace;
}
String secretPrefix = map.get(UserOperatorConfig.STRIMZI_SECRET_PREFIX);
if (secretPrefix == null || secretPrefix.isEmpty()) {
secretPrefix = DEFAULT_SECRET_PREFIX;
}
boolean aclsAdminApiSupported = getBooleanProperty(map, UserOperatorConfig.STRIMZI_ACLS_ADMIN_API_SUPPORTED, UserOperatorConfig.DEFAULT_STRIMZI_ACLS_ADMIN_API_SUPPORTED);
boolean kraftEnabled = getBooleanProperty(map, UserOperatorConfig.STRIMZI_KRAFT_ENABLED, UserOperatorConfig.DEFAULT_STRIMZI_KRAFT_ENABLED);
int clientsCaValidityDays = getIntProperty(map, UserOperatorConfig.STRIMZI_CLIENTS_CA_VALIDITY, CertificateAuthority.DEFAULT_CERTS_VALIDITY_DAYS);
int clientsCaRenewalDays = getIntProperty(map, UserOperatorConfig.STRIMZI_CLIENTS_CA_RENEWAL, CertificateAuthority.DEFAULT_CERTS_RENEWAL_DAYS);
List<String> maintenanceWindows = parseMaintenanceTimeWindows(map.get(UserOperatorConfig.STRIMZI_MAINTENANCE_TIME_WINDOWS));
return new UserOperatorConfig(namespace, reconciliationInterval, kafkaBootstrapServers, labels, caCertSecretName, caKeySecretName, clusterCaCertSecretName, euoKeySecretName, caNamespace, secretPrefix, aclsAdminApiSupported, kraftEnabled, clientsCaValidityDays, clientsCaRenewalDays, scramPasswordLength, maintenanceWindows);
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class KafkaClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts, Kafka cm, boolean isOpenShift) {
assertThat(sts.getMetadata().getName(), is(KafkaCluster.kafkaClusterName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Kafka container
assertThat(sts.getSpec().getReplicas(), is(Integer.valueOf(replicas)));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().get().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
if (cm.getSpec().getKafka().getRack() != null) {
Rack rack = cm.getSpec().getKafka().getRack();
// check that the pod spec contains anti-affinity rules with the right topology key
PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
assertThat(podSpec.getAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(terms, is(notNullValue()));
assertThat(terms.size() > 0, is(true));
boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
assertThat(isTopologyKey, is(true));
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
assertThat(isInitKafka, is(true));
}
}
use of io.strimzi.operator.common.model.Labels in project strimzi-kafka-operator by strimzi.
the class JbodStorageTest method getPvcs.
private List<PersistentVolumeClaim> getPvcs(String namespace, String name) {
String kafkaStsName = KafkaCluster.kafkaClusterName(name);
Labels pvcSelector = Labels.forStrimziCluster(name).withStrimziKind(Kafka.RESOURCE_KIND).withStrimziName(kafkaStsName);
return mockClient.persistentVolumeClaims().inNamespace(namespace).withLabels(pvcSelector.toMap()).list().getItems();
}
use of io.strimzi.operator.common.model.Labels in project strimzi by strimzi.
the class KafkaClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts, Kafka cm, boolean isOpenShift) {
assertThat(sts.getMetadata().getName(), is(KafkaCluster.kafkaClusterName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Kafka container
assertThat(sts.getSpec().getReplicas(), is(Integer.valueOf(replicas)));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().get().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
if (cm.getSpec().getKafka().getRack() != null) {
Rack rack = cm.getSpec().getKafka().getRack();
// check that the pod spec contains anti-affinity rules with the right topology key
PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
assertThat(podSpec.getAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(terms, is(notNullValue()));
assertThat(terms.size() > 0, is(true));
boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
assertThat(isTopologyKey, is(true));
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
assertThat(isInitKafka, is(true));
}
}
Aggregations