use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class MetricsIsolatedST method testKafkaBridgeMetrics.
@ParallelTest
@Tag(BRIDGE)
@Tag(ACCEPTANCE)
void testKafkaBridgeMetrics(ExtensionContext extensionContext) {
String producerName = "bridge-producer";
String consumerName = "bridge-consumer";
MetricsCollector bridgeCollector = collector.toBuilder().withComponentName(BRIDGE_CLUSTER).withComponentType(ComponentType.KafkaBridge).build();
// Attach consumer before producer
BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaBridgeResources.serviceName(BRIDGE_CLUSTER)).withTopicName(bridgeTopic).withMessageCount(MESSAGE_COUNT).withPort(Constants.HTTP_BRIDGE_DEFAULT_PORT).withDelayMs(200).withPollInterval(200).build();
resourceManager.createResource(extensionContext, kafkaBridgeClientJob.producerStrimziBridge());
resourceManager.createResource(extensionContext, kafkaBridgeClientJob.consumerStrimziBridge());
TestUtils.waitFor("KafkaProducer metrics will be available", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> {
LOGGER.info("Looking for 'strimzi_bridge_kafka_producer_count' in bridge metrics");
kafkaBridgeMetricsData = bridgeCollector.collectMetricsFromPods();
Pattern producerCountPattern = Pattern.compile("strimzi_bridge_kafka_producer_count\\{.*,} ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> producerCountValues = MetricsCollector.collectSpecificMetric(producerCountPattern, kafkaBridgeMetricsData);
return producerCountValues.stream().mapToDouble(i -> i).count() == (double) 1;
});
TestUtils.waitFor("KafkaConsumer metrics will be available", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> {
LOGGER.info("Looking for 'strimzi_bridge_kafka_consumer_connection_count' in bridge metrics");
kafkaBridgeMetricsData = bridgeCollector.collectMetricsFromPods();
Pattern consumerConnectionsPattern = Pattern.compile("strimzi_bridge_kafka_consumer_connection_count\\{.*,} ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> consumerConnectionsValues = MetricsCollector.collectSpecificMetric(consumerConnectionsPattern, kafkaBridgeMetricsData);
return consumerConnectionsValues.stream().mapToDouble(i -> i).count() > 0;
});
assertThat("Collected KafkaBridge metrics doesn't contains jvm metrics", kafkaBridgeMetricsData.values().toString().contains("jvm"));
assertThat("Collected KafkaBridge metrics doesn't contains HTTP metrics", kafkaBridgeMetricsData.values().toString().contains("strimzi_bridge_http_server"));
Pattern bridgeResponse = Pattern.compile("system_cpu_count ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> values = MetricsCollector.collectSpecificMetric(bridgeResponse, kafkaBridgeMetricsData);
assertThat(values.stream().mapToDouble(i -> i).sum(), is((double) 1));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class MetricsIsolatedST method testZookeeperAliveConnections.
@ParallelTest
void testZookeeperAliveConnections() {
Pattern numAliveConnections = Pattern.compile("zookeeper_numaliveconnections ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> values = MetricsCollector.collectSpecificMetric(numAliveConnections, zookeeperMetricsData);
assertThat("Zookeeper alive connections count doesn't match expected value", values.stream().mapToDouble(i -> i).count(), is(0L));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class MetricsIsolatedST method testUserOperatorMetrics.
@ParallelTest
@Tag(ACCEPTANCE)
void testUserOperatorMetrics() {
userOperatorMetricsData = collector.toBuilder().withComponentType(ComponentType.UserOperator).build().collectMetricsFromPods();
assertCoMetricNotNull("strimzi_reconciliations_locked_total", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_successful_total", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_duration_seconds_count", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_duration_seconds_sum", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_duration_seconds_max", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_periodical_total", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_failed_total", "KafkaUser", userOperatorMetricsData);
assertCoMetricNotNull("strimzi_reconciliations_total", "KafkaUser", userOperatorMetricsData);
Pattern userPattern = Pattern.compile("strimzi_resources\\{kind=\"KafkaUser\",.*} ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> values = MetricsCollector.collectSpecificMetric(userPattern, userOperatorMetricsData);
assertThat(values.stream().mapToDouble(i -> i).sum(), is((double) 2));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class MetricsIsolatedST method testKafkaActiveControllers.
@ParallelTest
void testKafkaActiveControllers() {
Pattern activeControllers = Pattern.compile("kafka_controller_kafkacontroller_activecontrollercount ([\\d.][^\\n]+)", Pattern.CASE_INSENSITIVE);
ArrayList<Double> values = MetricsCollector.collectSpecificMetric(activeControllers, kafkaMetricsData);
assertThat("Kafka active controllers count doesn't match expected value", values.stream().mapToDouble(i -> i).sum(), is((double) 1));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaConnectorWithoutClusterConfig.
@ParallelTest
@Tag(CONNECTOR_OPERATOR)
void testKafkaConnectorWithoutClusterConfig(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// This test check NPE when connect cluster is not specified in labels
// Check for NPE in CO logs is performed after every test in BaseST
resourceManager.createResource(extensionContext, false, KafkaConnectorTemplates.kafkaConnector(clusterName, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, 2).withNewMetadata().withName(clusterName).withNamespace(ResourceManager.kubeClient().getNamespace()).endMetadata().build());
KafkaConnectorUtils.waitForConnectorNotReady(clusterName);
KafkaConnectorResource.kafkaConnectorClient().inNamespace(Constants.INFRA_NAMESPACE).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaConnectorUtils.waitForConnectorDeletion(clusterName);
}
Aggregations