use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class SecurityST method testAutoRenewCaCertsTriggerByExpiredCertificate.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testAutoRenewCaCertsTriggerByExpiredCertificate(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
// 1. Create the Secrets already, and a certificate that's already expired
InputStream secretInputStream = getClass().getClassLoader().getResourceAsStream("security-st-certs/expired-cluster-ca.crt");
String clusterCaCert = TestUtils.readResource(secretInputStream);
SecretUtils.createSecret(testStorage.getNamespaceName(), clusterCaCertificateSecretName(testStorage.getClusterName()), "ca.crt", clusterCaCert);
// 2. Now create a cluster
createKafkaCluster(extensionContext, testStorage.getClusterName());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Wait until the certificates have been replaced
SecretUtils.waitForCertToChange(testStorage.getNamespaceName(), clusterCaCert, clusterCaCertificateSecretName(testStorage.getClusterName()));
// Wait until the pods are all up and ready
KafkaUtils.waitForClusterStability(testStorage.getNamespaceName(), testStorage.getClusterName());
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class CustomAuthorizerST method testAclWithSuperUser.
@ParallelTest
@Tag(INTERNAL_CLIENTS_USED)
void testAclWithSuperUser(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CLUSTER_NAME, testStorage.getTopicName(), namespace).build());
KafkaUser adminUser = KafkaUserTemplates.tlsUser(namespace, CLUSTER_NAME, ADMIN).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(testStorage.getTopicName()).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(testStorage.getTopicName()).endAclRuleTopicResource().withOperation(AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build();
resourceManager.createResource(extensionContext, adminUser);
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", ADMIN, testStorage.getTopicName());
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)).withTopicName(testStorage.getTopicName()).withUserName(ADMIN).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Checking kafka super user:{} that is able to read messages to topic:{} regardless that " + "we configured Acls with only write operation", ADMIN, TOPIC_NAME);
resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class CustomCaST method testCustomClusterCaAndClientsCaCertificates.
@ParallelNamespaceTest
void testCustomClusterCaAndClientsCaCertificates(ExtensionContext extensionContext) {
final TestStorage ts = new TestStorage(extensionContext);
final String testSuite = extensionContext.getRequiredTestClass().getSimpleName();
final SystemTestCertHolder clientsCa = new SystemTestCertHolder("CN=" + testSuite + "ClientsCA", KafkaResources.clientsCaCertificateSecretName(ts.getClusterName()), KafkaResources.clientsCaKeySecretName(ts.getClusterName()));
final SystemTestCertHolder clusterCa = new SystemTestCertHolder("CN=" + testSuite + "ClusterCA", KafkaResources.clusterCaCertificateSecretName(ts.getClusterName()), KafkaResources.clusterCaKeySecretName(ts.getClusterName()));
// prepare custom Ca and copy that to the related Secrets
clientsCa.prepareCustomSecretsFromBundles(ts.getNamespaceName(), ts.getClusterName());
clusterCa.prepareCustomSecretsFromBundles(ts.getNamespaceName(), ts.getClusterName());
final X509Certificate clientsCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clientsCaCertificateSecretName(ts.getClusterName())), "ca.crt");
final X509Certificate clusterCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clusterCaCertificateSecretName(ts.getClusterName())), "ca.crt");
checkCustomCaCorrectness(clientsCa, clientsCert);
checkCustomCaCorrectness(clusterCa, clusterCert);
LOGGER.info("Deploy kafka with new certs/secrets.");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(ts.getClusterName(), 3, 3).editSpec().withNewClusterCa().withGenerateCertificateAuthority(false).endClusterCa().withNewClientsCa().withGenerateCertificateAuthority(false).endClientsCa().endSpec().build());
LOGGER.info("Check Kafka(s) and Zookeeper(s) certificates.");
final X509Certificate kafkaCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getClusterName() + "-kafka-brokers"), ts.getClusterName() + "-kafka-0.crt");
assertThat("KafkaCert does not have expected test Issuer: " + kafkaCert.getIssuerDN(), SystemTestCertManager.containsAllDN(kafkaCert.getIssuerX500Principal().getName(), clusterCa.getSubjectDn()));
X509Certificate zookeeperCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getClusterName() + "-zookeeper-nodes"), ts.getClusterName() + "-zookeeper-0.crt");
assertThat("ZookeeperCert does not have expected test Subject: " + zookeeperCert.getIssuerDN(), SystemTestCertManager.containsAllDN(zookeeperCert.getIssuerX500Principal().getName(), clusterCa.getSubjectDn()));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(ts.getClusterName(), ts.getTopicName()).build());
LOGGER.info("Check KafkaUser certificate.");
final KafkaUser user = KafkaUserTemplates.tlsUser(ts.getClusterName(), ts.getUserName()).build();
resourceManager.createResource(extensionContext, user);
final X509Certificate userCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getUserName()), "user.crt");
assertThat("Generated ClientsCA does not have expected test Subject: " + userCert.getIssuerDN(), SystemTestCertManager.containsAllDN(userCert.getIssuerX500Principal().getName(), clientsCa.getSubjectDn()));
LOGGER.info("Send and receive messages over TLS.");
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(ts.getProducerName()).withConsumerName(ts.getConsumerName()).withNamespaceName(ts.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(ts.getClusterName())).withTopicName(ts.getTopicName()).withUserName(ts.getUserName()).build();
LOGGER.info("Checking produced and consumed messages via TLS");
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(ts.getClusterName()), kafkaClients.consumerTlsStrimzi(ts.getClusterName()));
ClientUtils.waitForClientsSuccess(ts.getProducerName(), ts.getConsumerName(), ts.getNamespaceName(), MESSAGE_COUNT, false);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class OauthPlainIsolatedST method testSaslPlainAuthenticationKafkaConnectIsAbleToConnectToKafkaOAuth.
@ParallelTest
void testSaslPlainAuthenticationKafkaConnectIsAbleToConnectToKafkaOAuth(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(testStorage.getClusterName(), oauthClusterName, 1).withNewSpec().withReplicas(1).withBootstrapServers(KafkaResources.plainBootstrapAddress(oauthClusterName)).withConfig(connectorConfig).addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewKafkaClientAuthenticationPlain().withUsername("kafka-connect").withNewPasswordSecret().withSecretName(CONNECT_OAUTH_SECRET).withPassword("clientSecret").endPasswordSecret().endKafkaClientAuthenticationPlain().withTls(null).endSpec().build());
// verify that KafkaConnect is able to connect to Oauth Kafka configured as plain
KafkaConnectUtils.waitForConnectReady(testStorage.getNamespaceName(), testStorage.getClusterName());
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponents.
@Tag(ACCEPTANCE)
@IsolatedTest
@RequiredMinKubeApiVersion(version = 1.17)
void testDrainCleanerWithComponents(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
final int replicas = 3;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build());
drainCleaner.createDrainCleaner(extensionContext);
KafkaClients kafkaBasicExampleClients = new KafkaClientsBuilder().withMessageCount(300).withTopicName(testStorage.getTopicName()).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
for (int i = 0; i < replicas; i++) {
String zkPodName = KafkaResources.zookeeperPodName(testStorage.getClusterName(), i);
String kafkaPodName = KafkaResources.kafkaPodName(testStorage.getClusterName(), i);
Map<String, String> zkPod = null;
if (!Environment.isKRaftModeEnabled()) {
zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (!Environment.isKRaftModeEnabled()) {
LOGGER.info("Evicting pods: {}", zkPodName);
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(zkPodName).evict();
}
LOGGER.info("Evicting pods: {}", kafkaPodName);
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(kafkaPodName).evict();
if (!Environment.isKRaftModeEnabled()) {
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
}
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
}
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), Constants.DRAIN_CLEANER_NAMESPACE, 300);
}
Aggregations