Search in sources :

Example 1 with MetricsCollector

use of io.strimzi.systemtest.metrics.MetricsCollector in project strimzi by strimzi.

the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithPlainListener.

@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
void testNetworkPoliciesWithPlainListener(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(namespace).createInstallation().runInstallation();
    String allowedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-allow";
    String deniedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-deny";
    Map<String, String> matchLabelForPlain = new HashMap<>();
    matchLabelForPlain.put("app", allowedKafkaClientsName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(matchLabelForPlain).endPodSelector().build()).build()).endKafka().withNewKafkaExporter().endKafkaExporter().endSpec().build());
    NetworkPolicyResource.allowNetworkPolicySettingsForKafkaExporter(extensionContext, clusterName);
    String topic0 = "topic-example-0";
    String topic1 = "topic-example-1";
    String userName = "user-example";
    KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
    resourceManager.createResource(extensionContext, kafkaUser);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic0).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic1).build());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, allowedKafkaClientsName, kafkaUser).build());
    String allowedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(allowedKafkaClientsName).get(0).getMetadata().getName();
    LOGGER.info("Verifying that {} pod is able to exchange messages", allowedKafkaClientsPodName);
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(allowedKafkaClientsPodName).withTopicName(topic0).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
    internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, deniedKafkaClientsName, kafkaUser).build());
    String deniedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(deniedKafkaClientsName).get(0).getMetadata().getName();
    InternalKafkaClient newInternalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(deniedKafkaClientsPodName).withTopicName(topic1).build();
    LOGGER.info("Verifying that {} pod is not able to exchange messages", deniedKafkaClientsPodName);
    assertThrows(AssertionError.class, () -> {
        newInternalKafkaClient.checkProducedAndConsumedMessages(newInternalKafkaClient.sendMessagesPlain(), newInternalKafkaClient.receiveMessagesPlain());
    });
    LOGGER.info("Check metrics exported by Kafka Exporter");
    MetricsCollector metricsCollector = new MetricsCollector.Builder().withScraperPodName(allowedKafkaClientsPodName).withComponentName(clusterName).withComponentType(ComponentType.KafkaExporter).build();
    Map<String, String> kafkaExporterMetricsData = metricsCollector.collectMetricsFromPods();
    assertThat("Kafka Exporter metrics should be non-empty", kafkaExporterMetricsData.size() > 0);
    for (Map.Entry<String, String> entry : kafkaExporterMetricsData.entrySet()) {
        assertThat("Value from collected metric should be non-empty", !entry.getValue().isEmpty());
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_consumergroup_current_offset"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic0 + "\"} 1"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic1 + "\"} 1"));
    }
}
Also used : MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) HashMap(java.util.HashMap) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) Map(java.util.Map) HashMap(java.util.HashMap) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 2 with MetricsCollector

use of io.strimzi.systemtest.metrics.MetricsCollector in project strimzi-kafka-operator by strimzi.

the class RollingUpdateST method testMetricsChange.

@IsolatedTest
@Tag(ROLLING_UPDATE)
@SuppressWarnings("checkstyle:MethodLength")
void testMetricsChange(ExtensionContext extensionContext) throws JsonProcessingException {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    // Kafka
    Map<String, Object> kafkaRule = new HashMap<>();
    kafkaRule.put("pattern", "kafka.(\\w+)<type=(.+), name=(.+)><>Count");
    kafkaRule.put("name", "kafka_$1_$2_$3_count");
    kafkaRule.put("type", "COUNTER");
    Map<String, Object> kafkaMetrics = new HashMap<>();
    kafkaMetrics.put("lowercaseOutputName", true);
    kafkaMetrics.put("rules", Collections.singletonList(kafkaRule));
    String metricsCMNameK = "k-metrics-cm";
    ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
    String yaml = mapper.writeValueAsString(kafkaMetrics);
    ConfigMap metricsCMK = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameK).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", yaml)).build();
    JmxPrometheusExporterMetrics kafkaMetricsConfig = new JmxPrometheusExporterMetricsBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(metricsCMNameK).withKey("metrics-config.yml").withOptional(true).build()).endValueFrom().build();
    // Zookeeper
    Map<String, Object> zookeeperLabels = new HashMap<>();
    zookeeperLabels.put("replicaId", "$2");
    Map<String, Object> zookeeperRule = new HashMap<>();
    zookeeperRule.put("labels", zookeeperLabels);
    zookeeperRule.put("name", "zookeeper_$3");
    zookeeperRule.put("pattern", "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)");
    Map<String, Object> zookeeperMetrics = new HashMap<>();
    zookeeperMetrics.put("lowercaseOutputName", true);
    zookeeperMetrics.put("rules", Collections.singletonList(zookeeperRule));
    String metricsCMNameZk = "zk-metrics-cm";
    ConfigMap metricsCMZk = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameZk).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(zookeeperMetrics))).build();
    JmxPrometheusExporterMetrics zkMetricsConfig = new JmxPrometheusExporterMetricsBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(metricsCMNameZk).withKey("metrics-config.yml").withOptional(true).build()).endValueFrom().build();
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMK);
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMZk);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withMetricsConfig(kafkaMetricsConfig).endKafka().editOrNewZookeeper().withMetricsConfig(zkMetricsConfig).endZookeeper().withNewKafkaExporter().endKafkaExporter().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).editMetadata().withNamespace(namespace).endMetadata().build());
    String metricsScraperPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespace, kafkaClientsName).get(0).getMetadata().getName();
    MetricsCollector metricsCollector = new MetricsCollector.Builder().withNamespaceName(namespace).withScraperPodName(metricsScraperPodName).withComponentName(clusterName).withComponentType(ComponentType.Kafka).build();
    LOGGER.info("Check if metrics are present in pod of Kafka and Zookeeper");
    Map<String, String> kafkaMetricsOutput = metricsCollector.collectMetricsFromPods();
    Map<String, String> zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPods();
    assertThat(kafkaMetricsOutput.values().toString().contains("kafka_"), is(true));
    assertThat(zkMetricsOutput.values().toString().contains("replicaId"), is(true));
    LOGGER.info("Changing metrics to something else");
    kafkaRule.replace("pattern", "kafka.(\\w+)<type=(.+), name=(.+)><>Count", "kafka.(\\w+)<type=(.+), name=(.+)Percent\\w*><>MeanRate");
    kafkaRule.replace("name", "kafka_$1_$2_$3_count", "kafka_$1_$2_$3_percent");
    kafkaRule.replace("type", "COUNTER", "GAUGE");
    zookeeperRule.replace("pattern", "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)", "org.apache.ZooKeeperService<name0=StandaloneServer_port(\\d+)><>(\\w+)");
    zookeeperRule.replace("name", "zookeeper_$3", "zookeeper_$2");
    zookeeperRule.replace("labels", zookeeperLabels, null);
    metricsCMZk = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameZk).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(zookeeperMetrics))).build();
    metricsCMK = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameK).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(kafkaMetrics))).build();
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMK);
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMZk);
    PodUtils.verifyThatRunningPodsAreStable(namespace, KafkaResources.zookeeperStatefulSetName(clusterName));
    PodUtils.verifyThatRunningPodsAreStable(namespace, KafkaResources.kafkaStatefulSetName(clusterName));
    LOGGER.info("Check if Kafka and Zookeeper pods didn't roll");
    assertThat(PodUtils.podSnapshot(namespace, zkSelector), is(zkPods));
    assertThat(PodUtils.podSnapshot(namespace, kafkaSelector), is(kafkaPods));
    LOGGER.info("Check if Kafka and Zookeeper metrics are changed");
    ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory());
    String kafkaMetricsConf = kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(metricsCMNameK).get().getData().get("metrics-config.yml");
    String zkMetricsConf = kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(metricsCMNameZk).get().getData().get("metrics-config.yml");
    Object kafkaMetricsJsonToYaml = yamlReader.readValue(kafkaMetricsConf, Object.class);
    Object zkMetricsJsonToYaml = yamlReader.readValue(zkMetricsConf, Object.class);
    ObjectMapper jsonWriter = new ObjectMapper();
    assertThat(kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).get().getData().get(Constants.METRICS_CONFIG_JSON_NAME), is(jsonWriter.writeValueAsString(kafkaMetricsJsonToYaml)));
    assertThat(kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)).get().getData().get(Constants.METRICS_CONFIG_JSON_NAME), is(jsonWriter.writeValueAsString(zkMetricsJsonToYaml)));
    LOGGER.info("Check if metrics are present in pod of Kafka and Zookeeper");
    kafkaMetricsOutput = metricsCollector.collectMetricsFromPods();
    zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPods();
    assertThat(kafkaMetricsOutput.values().toString().contains("kafka_"), is(true));
    assertThat(zkMetricsOutput.values().toString().contains("replicaId"), is(true));
    LOGGER.info("Removing metrics from Kafka and Zookeeper and setting them to null");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        kafka.getSpec().getKafka().setMetricsConfig(null);
        kafka.getSpec().getZookeeper().setMetricsConfig(null);
    }, namespace);
    LOGGER.info("Wait if Kafka and Zookeeper pods will roll");
    RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespace, zkSelector, 3, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
    LOGGER.info("Check if metrics are not existing in pods");
    kafkaMetricsOutput = metricsCollector.collectMetricsFromPodsWithoutWait();
    zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPodsWithoutWait();
    kafkaMetricsOutput.values().forEach(value -> assertThat(value, is("")));
    zkMetricsOutput.values().forEach(value -> assertThat(value, is("")));
}
Also used : MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) JmxPrometheusExporterMetricsBuilder(io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) YAMLFactory(com.fasterxml.jackson.dataformat.yaml.YAMLFactory) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 3 with MetricsCollector

use of io.strimzi.systemtest.metrics.MetricsCollector in project strimzi-kafka-operator by strimzi.

the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithPlainListener.

@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
void testNetworkPoliciesWithPlainListener(ExtensionContext extensionContext) {
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(namespace).createInstallation().runInstallation();
    String allowedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-allow";
    String deniedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-deny";
    Map<String, String> matchLabelForPlain = new HashMap<>();
    matchLabelForPlain.put("app", allowedKafkaClientsName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(matchLabelForPlain).endPodSelector().build()).build()).endKafka().withNewKafkaExporter().endKafkaExporter().endSpec().build());
    NetworkPolicyResource.allowNetworkPolicySettingsForKafkaExporter(extensionContext, clusterName);
    String topic0 = "topic-example-0";
    String topic1 = "topic-example-1";
    String userName = "user-example";
    KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
    resourceManager.createResource(extensionContext, kafkaUser);
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic0).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic1).build());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, allowedKafkaClientsName, kafkaUser).build());
    String allowedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(allowedKafkaClientsName).get(0).getMetadata().getName();
    LOGGER.info("Verifying that {} pod is able to exchange messages", allowedKafkaClientsPodName);
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(allowedKafkaClientsPodName).withTopicName(topic0).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
    internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, deniedKafkaClientsName, kafkaUser).build());
    String deniedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(deniedKafkaClientsName).get(0).getMetadata().getName();
    InternalKafkaClient newInternalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(deniedKafkaClientsPodName).withTopicName(topic1).build();
    LOGGER.info("Verifying that {} pod is not able to exchange messages", deniedKafkaClientsPodName);
    assertThrows(AssertionError.class, () -> {
        newInternalKafkaClient.checkProducedAndConsumedMessages(newInternalKafkaClient.sendMessagesPlain(), newInternalKafkaClient.receiveMessagesPlain());
    });
    LOGGER.info("Check metrics exported by Kafka Exporter");
    MetricsCollector metricsCollector = new MetricsCollector.Builder().withScraperPodName(allowedKafkaClientsPodName).withComponentName(clusterName).withComponentType(ComponentType.KafkaExporter).build();
    Map<String, String> kafkaExporterMetricsData = metricsCollector.collectMetricsFromPods();
    assertThat("Kafka Exporter metrics should be non-empty", kafkaExporterMetricsData.size() > 0);
    for (Map.Entry<String, String> entry : kafkaExporterMetricsData.entrySet()) {
        assertThat("Value from collected metric should be non-empty", !entry.getValue().isEmpty());
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_consumergroup_current_offset"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic0 + "\"} 1"));
        assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic1 + "\"} 1"));
    }
}
Also used : MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) HashMap(java.util.HashMap) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) Map(java.util.Map) HashMap(java.util.HashMap) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 4 with MetricsCollector

use of io.strimzi.systemtest.metrics.MetricsCollector in project strimzi by strimzi.

the class RollingUpdateST method testMetricsChange.

@IsolatedTest
@Tag(ROLLING_UPDATE)
@SuppressWarnings("checkstyle:MethodLength")
void testMetricsChange(ExtensionContext extensionContext) throws JsonProcessingException {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    // Kafka
    Map<String, Object> kafkaRule = new HashMap<>();
    kafkaRule.put("pattern", "kafka.(\\w+)<type=(.+), name=(.+)><>Count");
    kafkaRule.put("name", "kafka_$1_$2_$3_count");
    kafkaRule.put("type", "COUNTER");
    Map<String, Object> kafkaMetrics = new HashMap<>();
    kafkaMetrics.put("lowercaseOutputName", true);
    kafkaMetrics.put("rules", Collections.singletonList(kafkaRule));
    String metricsCMNameK = "k-metrics-cm";
    ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
    String yaml = mapper.writeValueAsString(kafkaMetrics);
    ConfigMap metricsCMK = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameK).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", yaml)).build();
    JmxPrometheusExporterMetrics kafkaMetricsConfig = new JmxPrometheusExporterMetricsBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(metricsCMNameK).withKey("metrics-config.yml").withOptional(true).build()).endValueFrom().build();
    // Zookeeper
    Map<String, Object> zookeeperLabels = new HashMap<>();
    zookeeperLabels.put("replicaId", "$2");
    Map<String, Object> zookeeperRule = new HashMap<>();
    zookeeperRule.put("labels", zookeeperLabels);
    zookeeperRule.put("name", "zookeeper_$3");
    zookeeperRule.put("pattern", "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)");
    Map<String, Object> zookeeperMetrics = new HashMap<>();
    zookeeperMetrics.put("lowercaseOutputName", true);
    zookeeperMetrics.put("rules", Collections.singletonList(zookeeperRule));
    String metricsCMNameZk = "zk-metrics-cm";
    ConfigMap metricsCMZk = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameZk).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(zookeeperMetrics))).build();
    JmxPrometheusExporterMetrics zkMetricsConfig = new JmxPrometheusExporterMetricsBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName(metricsCMNameZk).withKey("metrics-config.yml").withOptional(true).build()).endValueFrom().build();
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMK);
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMZk);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withMetricsConfig(kafkaMetricsConfig).endKafka().editOrNewZookeeper().withMetricsConfig(zkMetricsConfig).endZookeeper().withNewKafkaExporter().endKafkaExporter().endSpec().build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).editMetadata().withNamespace(namespace).endMetadata().build());
    String metricsScraperPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespace, kafkaClientsName).get(0).getMetadata().getName();
    MetricsCollector metricsCollector = new MetricsCollector.Builder().withNamespaceName(namespace).withScraperPodName(metricsScraperPodName).withComponentName(clusterName).withComponentType(ComponentType.Kafka).build();
    LOGGER.info("Check if metrics are present in pod of Kafka and Zookeeper");
    Map<String, String> kafkaMetricsOutput = metricsCollector.collectMetricsFromPods();
    Map<String, String> zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPods();
    assertThat(kafkaMetricsOutput.values().toString().contains("kafka_"), is(true));
    assertThat(zkMetricsOutput.values().toString().contains("replicaId"), is(true));
    LOGGER.info("Changing metrics to something else");
    kafkaRule.replace("pattern", "kafka.(\\w+)<type=(.+), name=(.+)><>Count", "kafka.(\\w+)<type=(.+), name=(.+)Percent\\w*><>MeanRate");
    kafkaRule.replace("name", "kafka_$1_$2_$3_count", "kafka_$1_$2_$3_percent");
    kafkaRule.replace("type", "COUNTER", "GAUGE");
    zookeeperRule.replace("pattern", "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)", "org.apache.ZooKeeperService<name0=StandaloneServer_port(\\d+)><>(\\w+)");
    zookeeperRule.replace("name", "zookeeper_$3", "zookeeper_$2");
    zookeeperRule.replace("labels", zookeeperLabels, null);
    metricsCMZk = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameZk).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(zookeeperMetrics))).build();
    metricsCMK = new ConfigMapBuilder().withNewMetadata().withName(metricsCMNameK).withNamespace(namespace).endMetadata().withData(singletonMap("metrics-config.yml", mapper.writeValueAsString(kafkaMetrics))).build();
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMK);
    kubeClient(namespace).getClient().configMaps().inNamespace(namespace).createOrReplace(metricsCMZk);
    PodUtils.verifyThatRunningPodsAreStable(namespace, KafkaResources.zookeeperStatefulSetName(clusterName));
    PodUtils.verifyThatRunningPodsAreStable(namespace, KafkaResources.kafkaStatefulSetName(clusterName));
    LOGGER.info("Check if Kafka and Zookeeper pods didn't roll");
    assertThat(PodUtils.podSnapshot(namespace, zkSelector), is(zkPods));
    assertThat(PodUtils.podSnapshot(namespace, kafkaSelector), is(kafkaPods));
    LOGGER.info("Check if Kafka and Zookeeper metrics are changed");
    ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory());
    String kafkaMetricsConf = kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(metricsCMNameK).get().getData().get("metrics-config.yml");
    String zkMetricsConf = kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(metricsCMNameZk).get().getData().get("metrics-config.yml");
    Object kafkaMetricsJsonToYaml = yamlReader.readValue(kafkaMetricsConf, Object.class);
    Object zkMetricsJsonToYaml = yamlReader.readValue(zkMetricsConf, Object.class);
    ObjectMapper jsonWriter = new ObjectMapper();
    assertThat(kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).get().getData().get(Constants.METRICS_CONFIG_JSON_NAME), is(jsonWriter.writeValueAsString(kafkaMetricsJsonToYaml)));
    assertThat(kubeClient(namespace).getClient().configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)).get().getData().get(Constants.METRICS_CONFIG_JSON_NAME), is(jsonWriter.writeValueAsString(zkMetricsJsonToYaml)));
    LOGGER.info("Check if metrics are present in pod of Kafka and Zookeeper");
    kafkaMetricsOutput = metricsCollector.collectMetricsFromPods();
    zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPods();
    assertThat(kafkaMetricsOutput.values().toString().contains("kafka_"), is(true));
    assertThat(zkMetricsOutput.values().toString().contains("replicaId"), is(true));
    LOGGER.info("Removing metrics from Kafka and Zookeeper and setting them to null");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        kafka.getSpec().getKafka().setMetricsConfig(null);
        kafka.getSpec().getZookeeper().setMetricsConfig(null);
    }, namespace);
    LOGGER.info("Wait if Kafka and Zookeeper pods will roll");
    RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespace, zkSelector, 3, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
    LOGGER.info("Check if metrics are not existing in pods");
    kafkaMetricsOutput = metricsCollector.collectMetricsFromPodsWithoutWait();
    zkMetricsOutput = metricsCollector.toBuilder().withComponentType(ComponentType.Zookeeper).build().collectMetricsFromPodsWithoutWait();
    kafkaMetricsOutput.values().forEach(value -> assertThat(value, is("")));
    zkMetricsOutput.values().forEach(value -> assertThat(value, is("")));
}
Also used : MetricsCollector(io.strimzi.systemtest.metrics.MetricsCollector) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) HashMap(java.util.HashMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) JmxPrometheusExporterMetricsBuilder(io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) YAMLFactory(com.fasterxml.jackson.dataformat.yaml.YAMLFactory) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

IsolatedTest (io.strimzi.systemtest.annotations.IsolatedTest)4 MetricsCollector (io.strimzi.systemtest.metrics.MetricsCollector)4 HashMap (java.util.HashMap)4 Tag (org.junit.jupiter.api.Tag)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 YAMLFactory (com.fasterxml.jackson.dataformat.yaml.YAMLFactory)2 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)2 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)2 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)2 EnvVarBuilder (io.fabric8.kubernetes.api.model.EnvVarBuilder)2 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)2 NamespaceBuilder (io.fabric8.kubernetes.api.model.NamespaceBuilder)2 NetworkPolicyPeerBuilder (io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder)2 JmxPrometheusExporterMetrics (io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics)2 JmxPrometheusExporterMetricsBuilder (io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder)2 KafkaUser (io.strimzi.api.kafka.model.KafkaUser)2 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)2 InternalKafkaClient (io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient)2 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)2 Map (java.util.Map)2