use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class SecurityST method testCertRegeneratedAfterInternalCAisDeleted.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testCertRegeneratedAfterInternalCAisDeleted(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 1).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
List<Secret> secrets = kubeClient().listSecrets(testStorage.getNamespaceName()).stream().filter(secret -> secret.getMetadata().getName().startsWith(testStorage.getClusterName()) && secret.getMetadata().getName().endsWith("ca-cert")).collect(Collectors.toList());
for (Secret s : secrets) {
LOGGER.info("Verifying that secret {} with name {} is present", s, s.getMetadata().getName());
assertThat(s.getData(), is(notNullValue()));
}
for (Secret s : secrets) {
LOGGER.info("Deleting secret {}", s.getMetadata().getName());
kubeClient().deleteSecret(testStorage.getNamespaceName(), s.getMetadata().getName());
}
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()));
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
for (Secret s : secrets) {
SecretUtils.waitForSecretReady(testStorage.getNamespaceName(), s.getMetadata().getName(), () -> {
});
}
List<Secret> regeneratedSecrets = kubeClient().listSecrets(testStorage.getNamespaceName()).stream().filter(secret -> secret.getMetadata().getName().endsWith("ca-cert")).collect(Collectors.toList());
for (int i = 0; i < secrets.size(); i++) {
assertThat("Certificates has different cert UIDs", !secrets.get(i).getData().get("ca.crt").equals(regeneratedSecrets.get(i).getData().get("ca.crt")));
}
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class SecurityST method testAclRuleReadAndWrite.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testAclRuleReadAndWrite(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaUserWrite = "kafka-user-write";
final String kafkaUserRead = "kafka-user-read";
final int numberOfMessages = 500;
final String consumerGroupName = "consumer-group-name-1";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserWrite).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking KafkaUser {} that is able to send messages to topic '{}'", kafkaUserWrite, topicName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(kafkaUserWrite).withMessageCount(numberOfMessages).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(numberOfMessages));
assertThrows(GroupAuthorizationException.class, externalKafkaClient::receiveMessagesTls);
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserRead).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleGroupResource().withName(consumerGroupName).endAclRuleGroupResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// s describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withKafkaUsername(kafkaUserRead).withConsumerGroupName(consumerGroupName).build();
assertThat(newExternalKafkaClient.receiveMessagesTls(), is(numberOfMessages));
LOGGER.info("Checking KafkaUser {} that is not able to send messages to topic '{}'", kafkaUserRead, topicName);
assertThrows(Exception.class, newExternalKafkaClient::sendMessagesTls);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class SecurityST method testAutoRenewCaCertsTriggerByExpiredCertificate.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testAutoRenewCaCertsTriggerByExpiredCertificate(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
// 1. Create the Secrets already, and a certificate that's already expired
InputStream secretInputStream = getClass().getClassLoader().getResourceAsStream("security-st-certs/expired-cluster-ca.crt");
String clusterCaCert = TestUtils.readResource(secretInputStream);
SecretUtils.createSecret(testStorage.getNamespaceName(), clusterCaCertificateSecretName(testStorage.getClusterName()), "ca.crt", clusterCaCert);
// 2. Now create a cluster
createKafkaCluster(extensionContext, testStorage.getClusterName());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Wait until the certificates have been replaced
SecretUtils.waitForCertToChange(testStorage.getNamespaceName(), clusterCaCert, clusterCaCertificateSecretName(testStorage.getClusterName()));
// Wait until the pods are all up and ready
KafkaUtils.waitForClusterStability(testStorage.getNamespaceName(), testStorage.getClusterName());
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class ConnectIsolatedST method testKafkaConnectWithPlainAndScramShaAuthentication.
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test class")
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaConnectWithPlainAndScramShaAuthentication(ExtensionContext extensionContext) {
TestStorage storage = new TestStorage(extensionContext);
// Use a Kafka with plain listener disabled
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(storage.getClusterName(), storage.getUserName()).build();
resourceManager.createResource(extensionContext, kafkaUser);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(storage.getClusterName(), storage.getTopicName()).build());
KafkaConnect connect = KafkaConnectTemplates.kafkaConnectWithFilePlugin(storage.getNamespaceName(), storage.getClusterName(), 1).editSpec().withBootstrapServers(KafkaResources.plainBootstrapAddress(storage.getClusterName())).withNewKafkaClientAuthenticationScramSha512().withUsername(storage.getUserName()).withPasswordSecret(new PasswordSecretSourceBuilder().withSecretName(storage.getUserName()).withPassword("password").build()).endKafkaClientAuthenticationScramSha512().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withVersion(Environment.ST_KAFKA_VERSION).withReplicas(1).endSpec().build();
// This is required to be able to remove the TLS setting, the builder cannot remove it
connect.getSpec().setTls(null);
resourceManager.createResource(extensionContext, connect, ScraperTemplates.scraperPod(storage.getNamespaceName(), storage.getScraperName()).build());
LOGGER.info("Deploy NetworkPolicies for KafkaConnect");
NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(storage.getClusterName()));
final String kafkaConnectPodName = kubeClient(storage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(storage.getClusterName())).get(0).getMetadata().getName();
final String kafkaConnectLogs = kubeClient(storage.getNamespaceName()).logs(kafkaConnectPodName);
final String scraperPodName = kubeClient(storage.getNamespaceName()).listPodsByPrefixInName(storage.getScraperName()).get(0).getMetadata().getName();
KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(storage.getNamespaceName(), kafkaConnectPodName);
LOGGER.info("Verifying that KafkaConnect pod logs don't contain ERRORs");
assertThat(kafkaConnectLogs, not(containsString("ERROR")));
LOGGER.info("Creating FileStreamSink connector via pod {} with topic {}", scraperPodName, storage.getTopicName());
KafkaConnectorUtils.createFileSinkConnector(storage.getNamespaceName(), scraperPodName, storage.getTopicName(), Constants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(storage.getClusterName(), storage.getNamespaceName(), 8083));
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(storage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(storage.getUserName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(storage.getClusterName())).withProducerName(storage.getProducerName()).withConsumerName(storage.getConsumerName()).withNamespaceName(storage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, kafkaClients.producerScramShaPlainStrimzi(), kafkaClients.consumerScramShaPlainStrimzi());
ClientUtils.waitForClientsSuccess(storage.getProducerName(), storage.getConsumerName(), storage.getNamespaceName(), MESSAGE_COUNT);
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(storage.getNamespaceName(), kafkaConnectPodName, Constants.DEFAULT_SINK_FILE_PATH, "99");
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class ConnectIsolatedST method testConnectTlsAuthWithWeirdUserName.
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(CONNECTOR_OPERATOR)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test class")
@ParallelNamespaceTest
void testConnectTlsAuthWithWeirdUserName(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
// Create weird named user with . and maximum of 64 chars -> TLS
final String weirdUserName = "jjglmahyijoambryleyxjjglmahy.ijoambryleyxjjglmahyijoambryleyxasd";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, weirdUserName).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(namespaceName, clusterName, 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewTls().withTrustedCertificates(new CertSecretSourceBuilder().withCertificate("ca.crt").withSecretName(KafkaResources.clusterCaCertificateSecretName(clusterName)).build()).endTls().withNewKafkaClientAuthenticationTls().withNewCertificateAndKey().withSecretName(weirdUserName).withCertificate("user.crt").withKey("user.key").endCertificateAndKey().endKafkaClientAuthenticationTls().withBootstrapServers(KafkaResources.tlsBootstrapAddress(clusterName)).endSpec().build());
testConnectAuthorizationWithWeirdUserName(extensionContext, clusterName, weirdUserName, SecurityProtocol.SSL, topicName);
}
Aggregations