use of io.strimzi.api.kafka.model.KafkaUser in project strimzi by strimzi.
the class UserST method testTlsExternalUser.
@ParallelNamespaceTest
void testTlsExternalUser(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final AclRule writeRule = new AclRuleBuilder().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).build();
final AclRule describeRule = new AclRuleBuilder().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.DESCRIBE).build();
// exercise (a) - create Kafka cluster with support for authorization
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().endKafka().endSpec().build());
// quotas configuration
final int prodRate = 1212;
final int consRate = 2121;
final int requestPerc = 21;
final double mutRate = 5d;
final KafkaUser tlsExternalUserWithQuotasAndAcls = KafkaUserTemplates.tlsExternalUser(namespaceName, clusterName, userName).editSpec().withNewKafkaUserAuthorizationSimple().addToAcls(writeRule, describeRule).endKafkaUserAuthorizationSimple().withNewQuotas().withConsumerByteRate(consRate).withProducerByteRate(prodRate).withRequestPercentage(requestPerc).withControllerMutationRate(mutRate).endQuotas().endSpec().build();
// exercise (b) - create the KafkaUser with tls external client authentication
resourceManager.createResource(extensionContext, tlsExternalUserWithQuotasAndAcls);
// verify (a) - that secrets are not generated and KafkaUser is created
KafkaUserUtils.waitForKafkaUserReady(namespaceName, userName);
assertThat(kubeClient().getSecret(namespaceName, userName), nullValue());
// verify (b) - if the operator has the right username in the status, that is what it also used in the ACLs and Quotas
KafkaUser user = KafkaUserResource.kafkaUserClient().inNamespace(namespaceName).withName(userName).get();
assertThat(user.getStatus().getUsername(), is("CN=" + userName));
}
use of io.strimzi.api.kafka.model.KafkaUser in project strimzi by strimzi.
the class UserST method testTlsUserWithQuotas.
@ParallelTest
void testTlsUserWithQuotas(ExtensionContext extensionContext) {
KafkaUser user = KafkaUserTemplates.tlsUser(namespace, userClusterName, "encrypted-arnost").build();
testUserWithQuotas(extensionContext, user);
}
use of io.strimzi.api.kafka.model.KafkaUser in project strimzi by strimzi.
the class CustomAuthorizerST method testAclWithSuperUser.
@ParallelTest
@Tag(INTERNAL_CLIENTS_USED)
void testAclWithSuperUser(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CLUSTER_NAME, testStorage.getTopicName(), namespace).build());
KafkaUser adminUser = KafkaUserTemplates.tlsUser(namespace, CLUSTER_NAME, ADMIN).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(testStorage.getTopicName()).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(testStorage.getTopicName()).endAclRuleTopicResource().withOperation(AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build();
resourceManager.createResource(extensionContext, adminUser);
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", ADMIN, testStorage.getTopicName());
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)).withTopicName(testStorage.getTopicName()).withUserName(ADMIN).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Checking kafka super user:{} that is able to read messages to topic:{} regardless that " + "we configured Acls with only write operation", ADMIN, TOPIC_NAME);
resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(CLUSTER_NAME));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.api.kafka.model.KafkaUser in project strimzi by strimzi.
the class CustomCaST method testCustomClusterCaAndClientsCaCertificates.
@ParallelNamespaceTest
void testCustomClusterCaAndClientsCaCertificates(ExtensionContext extensionContext) {
final TestStorage ts = new TestStorage(extensionContext);
final String testSuite = extensionContext.getRequiredTestClass().getSimpleName();
final SystemTestCertHolder clientsCa = new SystemTestCertHolder("CN=" + testSuite + "ClientsCA", KafkaResources.clientsCaCertificateSecretName(ts.getClusterName()), KafkaResources.clientsCaKeySecretName(ts.getClusterName()));
final SystemTestCertHolder clusterCa = new SystemTestCertHolder("CN=" + testSuite + "ClusterCA", KafkaResources.clusterCaCertificateSecretName(ts.getClusterName()), KafkaResources.clusterCaKeySecretName(ts.getClusterName()));
// prepare custom Ca and copy that to the related Secrets
clientsCa.prepareCustomSecretsFromBundles(ts.getNamespaceName(), ts.getClusterName());
clusterCa.prepareCustomSecretsFromBundles(ts.getNamespaceName(), ts.getClusterName());
final X509Certificate clientsCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clientsCaCertificateSecretName(ts.getClusterName())), "ca.crt");
final X509Certificate clusterCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), KafkaResources.clusterCaCertificateSecretName(ts.getClusterName())), "ca.crt");
checkCustomCaCorrectness(clientsCa, clientsCert);
checkCustomCaCorrectness(clusterCa, clusterCert);
LOGGER.info("Deploy kafka with new certs/secrets.");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(ts.getClusterName(), 3, 3).editSpec().withNewClusterCa().withGenerateCertificateAuthority(false).endClusterCa().withNewClientsCa().withGenerateCertificateAuthority(false).endClientsCa().endSpec().build());
LOGGER.info("Check Kafka(s) and Zookeeper(s) certificates.");
final X509Certificate kafkaCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getClusterName() + "-kafka-brokers"), ts.getClusterName() + "-kafka-0.crt");
assertThat("KafkaCert does not have expected test Issuer: " + kafkaCert.getIssuerDN(), SystemTestCertManager.containsAllDN(kafkaCert.getIssuerX500Principal().getName(), clusterCa.getSubjectDn()));
X509Certificate zookeeperCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getClusterName() + "-zookeeper-nodes"), ts.getClusterName() + "-zookeeper-0.crt");
assertThat("ZookeeperCert does not have expected test Subject: " + zookeeperCert.getIssuerDN(), SystemTestCertManager.containsAllDN(zookeeperCert.getIssuerX500Principal().getName(), clusterCa.getSubjectDn()));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(ts.getClusterName(), ts.getTopicName()).build());
LOGGER.info("Check KafkaUser certificate.");
final KafkaUser user = KafkaUserTemplates.tlsUser(ts.getClusterName(), ts.getUserName()).build();
resourceManager.createResource(extensionContext, user);
final X509Certificate userCert = SecretUtils.getCertificateFromSecret(kubeClient(ts.getNamespaceName()).getSecret(ts.getNamespaceName(), ts.getUserName()), "user.crt");
assertThat("Generated ClientsCA does not have expected test Subject: " + userCert.getIssuerDN(), SystemTestCertManager.containsAllDN(userCert.getIssuerX500Principal().getName(), clientsCa.getSubjectDn()));
LOGGER.info("Send and receive messages over TLS.");
KafkaClients kafkaClients = new KafkaClientsBuilder().withProducerName(ts.getProducerName()).withConsumerName(ts.getConsumerName()).withNamespaceName(ts.getNamespaceName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(ts.getClusterName())).withTopicName(ts.getTopicName()).withUserName(ts.getUserName()).build();
LOGGER.info("Checking produced and consumed messages via TLS");
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(ts.getClusterName()), kafkaClients.consumerTlsStrimzi(ts.getClusterName()));
ClientUtils.waitForClientsSuccess(ts.getProducerName(), ts.getConsumerName(), ts.getNamespaceName(), MESSAGE_COUNT, false);
}
use of io.strimzi.api.kafka.model.KafkaUser in project strimzi by strimzi.
the class ConnectIsolatedST method testKafkaConnectWithPlainAndScramShaAuthentication.
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test class")
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaConnectWithPlainAndScramShaAuthentication(ExtensionContext extensionContext) {
TestStorage storage = new TestStorage(extensionContext);
// Use a Kafka with plain listener disabled
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(storage.getClusterName(), storage.getUserName()).build();
resourceManager.createResource(extensionContext, kafkaUser);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(storage.getClusterName(), storage.getTopicName()).build());
KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(storage.getNamespaceName(), storage.getClusterName(), 1).editSpec().withBootstrapServers(KafkaResources.plainBootstrapAddress(storage.getClusterName())).withNewKafkaClientAuthenticationScramSha512().withUsername(storage.getUserName()).withPasswordSecret(new PasswordSecretSourceBuilder().withSecretName(storage.getUserName()).withPassword("password").build()).endKafkaClientAuthenticationScramSha512().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withVersion(Environment.ST_KAFKA_VERSION).withReplicas(1).endSpec().build();
// This is required to be able to remove the TLS setting, the builder cannot remove it
connect.getSpec().setTls(null);
resourceManager.createResource(extensionContext, connect, ScraperTemplates.scraperPod(storage.getNamespaceName(), storage.getScraperName()).build());
LOGGER.info("Deploy NetworkPolicies for KafkaConnect");
NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(storage.getClusterName()));
final String kafkaConnectPodName = kubeClient(storage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(storage.getClusterName())).get(0).getMetadata().getName();
final String kafkaConnectLogs = kubeClient(storage.getNamespaceName()).logs(kafkaConnectPodName);
final String scraperPodName = kubeClient(storage.getNamespaceName()).listPodsByPrefixInName(storage.getScraperName()).get(0).getMetadata().getName();
KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(storage.getNamespaceName(), kafkaConnectPodName);
LOGGER.info("Verifying that KafkaConnect pod logs don't contain ERRORs");
assertThat(kafkaConnectLogs, not(containsString("ERROR")));
LOGGER.info("Creating FileStreamSink connector via pod {} with topic {}", scraperPodName, storage.getTopicName());
KafkaConnectorUtils.createFileSinkConnector(storage.getNamespaceName(), scraperPodName, storage.getTopicName(), Constants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(storage.getClusterName(), storage.getNamespaceName(), 8083));
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(storage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(storage.getUserName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(storage.getClusterName())).withProducerName(storage.getProducerName()).withConsumerName(storage.getConsumerName()).withNamespaceName(storage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, kafkaClients.producerScramShaPlainStrimzi(), kafkaClients.consumerScramShaPlainStrimzi());
ClientUtils.waitForClientsSuccess(storage.getProducerName(), storage.getConsumerName(), storage.getNamespaceName(), MESSAGE_COUNT);
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(storage.getNamespaceName(), kafkaConnectPodName, Constants.DEFAULT_SINK_FILE_PATH, "99");
}
Aggregations