use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class ListenersST method testCustomSoloCertificatesForLoadBalancer.
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
void testCustomSoloCertificatesForLoadBalancer(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String clusterCustomCertServer1 = clusterName + "-" + customCertServer1;
SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9107).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9108).withType(KafkaListenerType.LOADBALANCER).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().withFinalizers(LB_FINALIZERS).endConfiguration().build()).endKafka().endSpec().build());
KafkaUser aliceUser = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, aliceUser);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withCertificateAuthorityCertificateName(clusterCustomCertServer1).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
// Deploy client pod with custom certificates and collect messages from internal TLS listener
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, false, aliceUser).build());
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName()).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-3").withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(MESSAGE_COUNT * 2);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(2 * MESSAGE_COUNT));
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class ListenersST method testNodePortTls.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testNodePortTls(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9101).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).withConfig(Collections.singletonMap("default.replication.factor", 3)).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class ListenersST method testCustomChainCertificatesForLoadBalancer.
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
void testCustomChainCertificatesForLoadBalancer(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String clusterCustomCertChain1 = clusterName + "-" + customCertChain1;
final String clusterCustomRootCA1 = clusterName + "-" + customRootCA1;
SecretUtils.createCustomSecret(clusterCustomCertChain1, clusterName, namespaceName, CHAIN_CERT_AND_KEY_1);
SecretUtils.createCustomSecret(clusterCustomRootCA1, clusterName, namespaceName, ROOT_CA_CERT_AND_KEY_1);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9109).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertChain1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9110).withType(KafkaListenerType.LOADBALANCER).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertChain1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().withFinalizers(LB_FINALIZERS).endConfiguration().build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix(namespaceName, topicName);
KafkaUser aliceUser = KafkaUserTemplates.tlsUser(clusterName, userName).editMetadata().withNamespace(namespaceName).endMetadata().build();
resourceManager.createResource(extensionContext, aliceUser);
KafkaUserUtils.waitForKafkaUserCreation(namespaceName, userName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withCertificateAuthorityCertificateName(clusterCustomRootCA1).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
// Deploy client pod with custom certificates and collect messages from internal TLS listener
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, false, aliceUser).build());
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName()).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-4").withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(MESSAGE_COUNT * 2);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(2 * MESSAGE_COUNT));
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class ListenersST method testMessagesTlsScramShaWithPredefinedPassword.
@ParallelNamespaceTest
void testMessagesTlsScramShaWithPredefinedPassword(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String firstUnencodedPassword = "completely_secret_password";
final String secondUnencodedPassword = "completely_different_secret_password";
final String firstEncodedPassword = Base64.getEncoder().encodeToString(firstUnencodedPassword.getBytes(StandardCharsets.UTF_8));
final String secondEncodedPassword = Base64.getEncoder().encodeToString(secondUnencodedPassword.getBytes(StandardCharsets.UTF_8));
final String secretName = clusterName + "-secret";
Secret password = new SecretBuilder().withNewMetadata().withName(secretName).endMetadata().addToData("password", firstEncodedPassword).build();
kubeClient().namespace(namespaceName).createSecret(password);
assertThat("Password in secret is not correct", kubeClient().namespace(namespaceName).getSecret(secretName).getData().get("password"), is(firstEncodedPassword));
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).editSpec().withNewKafkaUserScramSha512ClientAuthentication().withNewPassword().withNewValueFrom().withNewSecretKeyRef("password", secretName, false).endValueFrom().endPassword().endKafkaUserScramSha512ClientAuthentication().endSpec().build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9096).withTls(true).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build()).endKafka().endSpec().build(), kafkaUser, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, true, kafkaClientsName, kafkaUser).build());
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sentMessages = internalKafkaClient.sendMessagesTls();
internalKafkaClient.checkProducedAndConsumedMessages(sentMessages, internalKafkaClient.receiveMessagesTls());
LOGGER.info("Changing password in secret: {}, we should be able to send/receive messages", secretName);
password = new SecretBuilder(password).addToData("password", secondEncodedPassword).build();
kubeClient().namespace(namespaceName).createSecret(password);
SecretUtils.waitForUserPasswordChange(namespaceName, userName, secondEncodedPassword);
LOGGER.info("We need to recreate Kafka Clients deployment, so the correct password from secret will be taken");
resourceManager.deleteResource(kubeClient().getDeployment(namespaceName, kafkaClientsName));
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, true, kafkaClientsName, kafkaUser).build());
LOGGER.info("Receiving messages with new password");
kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
internalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(kafkaClientsPodName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.checkProducedAndConsumedMessages(sentMessages, internalKafkaClient.receiveMessagesTls());
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class ListenersST method testCustomChainCertificatesForRoute.
@ParallelNamespaceTest
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
@OpenShiftOnly
void testCustomChainCertificatesForRoute(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String clusterCustomCertChain1 = clusterName + "-" + customCertChain1;
final String clusterCustomRootCA1 = clusterName + "-" + customRootCA1;
SecretUtils.createCustomSecret(clusterCustomCertChain1, clusterName, namespaceName, CHAIN_CERT_AND_KEY_1);
SecretUtils.createCustomSecret(clusterCustomRootCA1, clusterName, namespaceName, ROOT_CA_CERT_AND_KEY_1);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9112).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertChain1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9113).withType(KafkaListenerType.ROUTE).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertChain1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build()).endKafka().endSpec().build());
KafkaUser aliceUser = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, aliceUser);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withCertificateAuthorityCertificateName(clusterCustomRootCA1).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
// Deploy client pod with custom certificates and collect messages from internal TLS listener
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, false, aliceUser).build());
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName()).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-6").withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(MESSAGE_COUNT * 2);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(2 * MESSAGE_COUNT));
}
Aggregations