use of io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED in project strimzi by strimzi.
the class KafkaST method testPersistentStorageSize.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("JBOD is not supported by KRaft mode and is used in this test case.")
void testPersistentStorageSize(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
final String[] diskSizes = { "70Gi", "20Gi" };
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), kafkaRepl).editSpec().editKafka().withStorage(jbodStorage).endKafka().editZookeeper().withReplicas(1).endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
List<PersistentVolumeClaim> volumes = kubeClient(testStorage.getNamespaceName()).listPersistentVolumeClaims(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(testStorage.getClusterName())).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED in project strimzi-kafka-operator by strimzi.
the class KafkaST method testPersistentStorageSize.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("JBOD is not supported by KRaft mode and is used in this test case.")
void testPersistentStorageSize(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
final String[] diskSizes = { "70Gi", "20Gi" };
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), kafkaRepl).editSpec().editKafka().withStorage(jbodStorage).endKafka().editZookeeper().withReplicas(1).endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
List<PersistentVolumeClaim> volumes = kubeClient(testStorage.getNamespaceName()).listPersistentVolumeClaims(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(testStorage.getClusterName())).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED in project strimzi-kafka-operator by strimzi.
the class KafkaST method testAppDomainLabels.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("TopicOperator is not supported by KRaft mode and is used in this test class")
void testAppDomainLabels(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(testStorage.getNamespaceName()).listPods(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(pod -> pod.getMetadata().getName().startsWith(testStorage.getClusterName())).collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
Map<String, String> kafkaLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()));
LOGGER.info("Getting labels from stateful set of kafka resource");
verifyAppLabels(kafkaLabels);
Map<String, String> zooLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName()));
LOGGER.info("Getting labels from stateful set of zookeeper resource");
verifyAppLabels(zooLabels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(testStorage.getNamespaceName()).listServices(testStorage.getNamespaceName()).stream().filter(service -> service.getMetadata().getName().startsWith(testStorage.getClusterName())).collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(testStorage.getNamespaceName()).listSecrets(testStorage.getNamespaceName()).stream().filter(secret -> secret.getMetadata().getName().startsWith(testStorage.getClusterName()) && secret.getType().equals("Opaque")).collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(testStorage.getNamespaceName()).listConfigMapsInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName());
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Aggregations