use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class MirrorMakerIsolatedST method testScaleMirrorMakerUpAndDownToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
void testScaleMirrorMakerUpAndDownToZero(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterTargetName, kafkaClusterSourceName, ClientUtils.generateRandomConsumerGroup(), 1, false).build());
int scaleTo = 2;
long mmObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName());
String mmGenName = kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getGenerateName();
LOGGER.info("-------> Scaling KafkaMirrorMaker up <-------");
LOGGER.info("Scaling subresource replicas to {}", scaleTo);
cmdKubeClient().namespace(testStorage.getNamespaceName()).scaleByName(KafkaMirrorMaker.RESOURCE_KIND, testStorage.getClusterName(), scaleTo);
DeploymentUtils.waitForDeploymentAndPodsReady(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), scaleTo);
LOGGER.info("Check if replicas is set to {}, naming prefix should be same and observed generation higher", scaleTo);
List<String> mmPods = kubeClient().listPodNames(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND);
assertThat(mmPods.size(), is(scaleTo));
assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getSpec().getReplicas(), is(scaleTo));
assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getReplicas(), is(scaleTo));
/*
observed generation should be higher than before scaling -> after change of spec and successful reconciliation,
the observed generation is increased
*/
long actualObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
assertTrue(mmObsGen < actualObsGen);
for (String pod : mmPods) {
assertTrue(pod.contains(mmGenName));
}
mmObsGen = actualObsGen;
LOGGER.info("-------> Scaling KafkaMirrorMaker down <-------");
LOGGER.info("Scaling MirrorMaker to zero");
KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(testStorage.getClusterName(), mm -> mm.getSpec().setReplicas(0), testStorage.getNamespaceName());
PodUtils.waitForPodsReady(testStorage.getNamespaceName(), kubeClient().getDeploymentSelectors(testStorage.getNamespaceName(), mmDepName), 0, true);
mmPods = kubeClient().listPodNames(testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND);
KafkaMirrorMakerStatus mmStatus = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus();
actualObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
assertThat(mmPods.size(), is(0));
assertThat(mmStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(actualObsGen, is(not(mmObsGen)));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class MirrorMakerIsolatedST method testIncludeList.
@ParallelNamespaceTest
void testIncludeList(ExtensionContext extensionContext) throws UnexpectedException {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
String topicName = "included-topic";
String topicNotIncluded = "not-included-topic";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicName).build(), KafkaTopicTemplates.topic(kafkaClusterSourceName, topicNotIncluded).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
clients = new KafkaClientsBuilder(clients).withTopicName(topicNotIncluded).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, false).editMetadata().withNamespace(testStorage.getNamespaceName()).endMetadata().editSpec().withInclude(topicName).endSpec().build());
clients = new KafkaClientsBuilder(clients).withTopicName(topicName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).build();
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
clients = new KafkaClientsBuilder(clients).withTopicName(topicNotIncluded).build();
LOGGER.info("Becuase {} is not included, we should not receive any message", topicNotIncluded);
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientTimeout(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class MirrorMakerIsolatedST method testMirrorMakerTlsScramSha.
/**
* Test mirroring messages by Mirror Maker over tls transport using scram-sha auth
*/
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings("checkstyle:methodlength")
void testMirrorMakerTlsScramSha(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
String kafkaSourceUserName = testStorage.getUserName() + "-source";
String kafkaTargetUserName = testStorage.getUserName() + "-target";
// Deploy source kafka with tls listener and SCRAM-SHA authentication
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
// Deploy target kafka with tls listener and SCRAM-SHA authentication
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
// Deploy topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName()).build(), KafkaUserTemplates.scramShaUser(kafkaClusterSourceName, kafkaSourceUserName).build(), KafkaUserTemplates.scramShaUser(kafkaClusterTargetName, kafkaTargetUserName).build());
// Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
PasswordSecretSource passwordSecretSource = new PasswordSecretSource();
passwordSecretSource.setSecretName(kafkaSourceUserName);
passwordSecretSource.setPassword("password");
// Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
PasswordSecretSource passwordSecretTarget = new PasswordSecretSource();
passwordSecretTarget.setSecretName(kafkaTargetUserName);
passwordSecretTarget.setPassword("password");
// Initialize CertSecretSource with certificate and secret names for consumer
CertSecretSource certSecretSource = new CertSecretSource();
certSecretSource.setCertificate("ca.crt");
certSecretSource.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterSourceName));
// Initialize CertSecretSource with certificate and secret names for producer
CertSecretSource certSecretTarget = new CertSecretSource();
certSecretTarget.setCertificate("ca.crt");
certSecretTarget.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterTargetName));
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withUserName(kafkaSourceUserName).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerScramShaTlsStrimzi(kafkaClusterSourceName), clients.consumerScramShaTlsStrimzi(kafkaClusterSourceName));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Deploy Mirror Maker with TLS and ScramSha512
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, true).editSpec().editConsumer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaSourceUserName).withPasswordSecret(passwordSecretSource).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretSource).endTls().endConsumer().editProducer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaTargetUserName).withPasswordSecret(passwordSecretTarget).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretTarget).endTls().endProducer().endSpec().build());
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(kafkaClusterTargetName)).withUserName(kafkaTargetUserName).build();
resourceManager.createResource(extensionContext, clients.consumerScramShaTlsStrimzi(kafkaClusterTargetName));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithPlainListener.
@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testNetworkPoliciesWithPlainListener(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
final String topic0 = "topic-example-0";
final String topic1 = "topic-example-1";
final String deniedProducerName = "denied-" + testStorage.getProducerName();
final String deniedConsumerName = "denied-" + testStorage.getConsumerName();
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(clusterOperator.getDeploymentNamespace()).createInstallation().runInstallation();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getProducerName()).endPodSelector().build(), new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels("app", testStorage.getConsumerName()).endPodSelector().build()).build()).endKafka().withNewKafkaExporter().endKafkaExporter().endSpec().build(), ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build(), KafkaUserTemplates.scramShaUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic0).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), topic1).build());
final String scraperPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName();
NetworkPolicyResource.allowNetworkPolicySettingsForKafkaExporter(extensionContext, testStorage.getClusterName());
LOGGER.info("Verifying that producer/consumer: {}/{} are able to exchange messages", testStorage.getProducerName(), testStorage.getConsumerName());
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(topic0).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, kafkaClients.producerScramShaPlainStrimzi(), kafkaClients.consumerScramShaPlainStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
kafkaClients = new KafkaClientsBuilder(kafkaClients).withProducerName(deniedProducerName).withConsumerName(deniedConsumerName).withTopicName(topic1).build();
LOGGER.info("Verifying that producer/consumer: {}/{} are not able to exchange messages", deniedProducerName, deniedConsumerName);
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsTimeout(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Check metrics exported by Kafka Exporter");
MetricsCollector metricsCollector = new MetricsCollector.Builder().withScraperPodName(scraperPodName).withComponentName(testStorage.getClusterName()).withComponentType(ComponentType.KafkaExporter).build();
Map<String, String> kafkaExporterMetricsData = metricsCollector.collectMetricsFromPods();
assertThat("Kafka Exporter metrics should be non-empty", kafkaExporterMetricsData.size() > 0);
for (Map.Entry<String, String> entry : kafkaExporterMetricsData.entrySet()) {
assertThat("Value from collected metric should be non-empty", !entry.getValue().isEmpty());
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_consumergroup_current_offset"));
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic0 + "\"} 1"));
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic1 + "\"} 1"));
}
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class OpaIntegrationST method testOpaAuthorization.
@ParallelTest
void testOpaAuthorization(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
KafkaUser goodUser = KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), CLUSTER_NAME, OPA_GOOD_USER).build();
KafkaUser badUser = KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), CLUSTER_NAME, OPA_BAD_USER).build();
resourceManager.createResource(extensionContext, goodUser);
resourceManager.createResource(extensionContext, badUser);
LOGGER.info("Checking KafkaUser {} that is able to send and receive messages to/from topic '{}'", OPA_GOOD_USER, testStorage.getTopicName());
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)).withTopicName(testStorage.getTopicName()).withUserName(OPA_GOOD_USER).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(CLUSTER_NAME), kafkaClients.consumerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Checking KafkaUser {} that is not able to send or receive messages to/from topic '{}'", OPA_BAD_USER, testStorage.getTopicName());
kafkaClients = new KafkaClientsBuilder(kafkaClients).withUserName(OPA_BAD_USER).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(CLUSTER_NAME), kafkaClients.consumerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientsTimeout(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
Aggregations