use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi-kafka-operator by strimzi.
the class ListenersST method testLoadBalancerTls.
@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testLoadBalancerTls(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9103).withType(KafkaListenerType.LOADBALANCER).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).withNewConfiguration().withFinalizers(LB_FINALIZERS).endConfiguration().build()).withConfig(Collections.singletonMap("default.replication.factor", 3)).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
ServiceUtils.waitUntilAddressIsReachable(KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getListeners().get(0).getAddresses().get(0).getHost());
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi by strimzi.
the class SecurityST method testAclWithSuperUser.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testAclWithSuperUser(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().withSuperUsers("CN=" + userName).endKafkaAuthorizationSimple().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", userName, topicName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
LOGGER.info("Checking kafka super user:{} that is able to read messages to topic:{} regardless that " + "we configured Acls with only write operation", userName, topicName);
assertThat(externalKafkaClient.receiveMessagesTls(), is(MESSAGE_COUNT));
String nonSuperuserName = userName + "-non-super-user";
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, nonSuperuserName).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", nonSuperuserName, topicName);
externalKafkaClient = externalKafkaClient.toBuilder().withKafkaUsername(nonSuperuserName).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
LOGGER.info("Checking kafka super user:{} that is not able to read messages to topic:{} because of defined" + " ACLs on only write operation", nonSuperuserName, topicName);
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
assertThrows(GroupAuthorizationException.class, newExternalKafkaClient::receiveMessagesTls);
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi by strimzi.
the class ListenersST method testCustomCertRouteAndTlsRollingUpdate.
@ParallelNamespaceTest
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
@OpenShiftOnly
@SuppressWarnings({ "checkstyle:MethodLength" })
void testCustomCertRouteAndTlsRollingUpdate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String clusterCustomCertServer1 = clusterName + "-" + customCertServer1;
final String clusterCustomCertServer2 = clusterName + "-" + customCertServer2;
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1);
SecretUtils.createCustomSecret(clusterCustomCertServer2, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_2);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9117).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9118).withType(KafkaListenerType.ROUTE).withTls(true).build()).endKafka().endSpec().build());
KafkaUser aliceUser = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, aliceUser);
String externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
String externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterName + "-cluster-ca-cert", "ca.crt");
String internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
LOGGER.info("Check if KafkaStatus certificates from external listeners are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
// External secret cert is same as internal in this case
assertThat(externalSecretCerts, is(internalCerts));
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withCertificateAuthorityCertificateName(null).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
Map<String, String> kafkaSnapshot = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9117).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9118).withType(KafkaListenerType.ROUTE).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build()));
}, namespaceName);
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(namespaceName, clusterName);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
String internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(null).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClient.receiveMessagesTls());
// Deploy client pod with custom certificates and collect messages from internal TLS listener
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, false, aliceUser).build());
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName()).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-91").withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(MESSAGE_COUNT * 3);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(3 * MESSAGE_COUNT));
SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_2);
SecretUtils.createCustomSecret(clusterCustomCertServer2, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1);
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls() + MESSAGE_COUNT, externalKafkaClient.receiveMessagesTls());
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName("consumer-group-certs-92").withMessageCount(MESSAGE_COUNT).build();
sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient = internalKafkaClient.toBuilder().withMessageCount(5 * MESSAGE_COUNT).build();
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(5 * MESSAGE_COUNT));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9117).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9118).withType(KafkaListenerType.ROUTE).withTls(true).build()));
}, namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(namespaceName, clusterName);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterName + "-cluster-ca-cert", "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(null).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
sent = externalKafkaClient.sendMessagesTls() + (5 * MESSAGE_COUNT);
externalKafkaClient = externalKafkaClient.toBuilder().withMessageCount(6 * MESSAGE_COUNT).build();
externalKafkaClient.verifyProducedAndConsumedMessages(sent, externalKafkaClient.receiveMessagesTls());
internalKafkaClient = internalKafkaClient.toBuilder().withMessageCount(6 * MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-93").build();
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(6 * MESSAGE_COUNT));
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi by strimzi.
the class ListenersST method testCustomCertNodePortAndTlsRollingUpdate.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
@SuppressWarnings({ "checkstyle:MethodLength" })
void testCustomCertNodePortAndTlsRollingUpdate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String clusterCustomCertServer1 = clusterName + "-" + customCertServer1;
final String clusterCustomCertServer2 = clusterName + "-" + customCertServer2;
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1);
SecretUtils.createCustomSecret(clusterCustomCertServer2, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_2);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).build()).endKafka().endSpec().build());
KafkaUser aliceUser = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, aliceUser);
String externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
String externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterName + "-cluster-ca-cert", "ca.crt");
String internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
LOGGER.info("Check if KafkaStatus certificates from external listeners are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
// External secret cert is same as internal in this case
assertThat(externalSecretCerts, is(internalCerts));
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
Map<String, String> kafkaSnapshot = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build()));
}, namespaceName);
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(namespaceName, clusterName);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
String internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(clusterCustomCertServer1).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
// Deploy client pod with custom certificates and collect messages from internal TLS listener
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, false, aliceUser).build());
int expectedMessageCountForNewGroup = MESSAGE_COUNT * 3;
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName()).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withConsumerGroupName("consumer-group-certs-71").withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesTls();
int expectedMessageCountForExternalClient = sent;
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(expectedMessageCountForNewGroup);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(expectedMessageCountForNewGroup));
SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_2);
SecretUtils.createCustomSecret(clusterCustomCertServer2, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1);
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
sent = externalKafkaClient.sendMessagesTls();
expectedMessageCountForExternalClient += sent;
expectedMessageCountForNewGroup += sent;
externalKafkaClient.verifyProducedAndConsumedMessages(expectedMessageCountForExternalClient, externalKafkaClient.receiveMessagesTls());
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName("consumer-group-certs-72").withMessageCount(MESSAGE_COUNT).build();
sent = internalKafkaClient.sendMessagesTls();
expectedMessageCountForNewGroup += sent;
expectedMessageCountForExternalClient = sent;
assertThat(sent, is(MESSAGE_COUNT));
internalKafkaClient.setMessageCount(expectedMessageCountForNewGroup);
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(expectedMessageCountForNewGroup));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9115).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9116).withType(KafkaListenerType.NODEPORT).withTls(true).build()));
}, namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(namespaceName, clusterName);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
externalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterName + "-cluster-ca-cert", "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, namespaceName, clusterName);
internalSecretCerts = getKafkaSecretCertificates(namespaceName, clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(null).build();
sent = externalKafkaClient.sendMessagesTls();
expectedMessageCountForExternalClient += sent;
expectedMessageCountForNewGroup += sent;
externalKafkaClient.verifyProducedAndConsumedMessages(expectedMessageCountForExternalClient, externalKafkaClient.receiveMessagesTls());
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName("consumer-group-certs-73").withMessageCount(expectedMessageCountForNewGroup).build();
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(expectedMessageCountForNewGroup));
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi by strimzi.
the class ConnectIsolatedST method testConnectAuthorizationWithWeirdUserName.
void testConnectAuthorizationWithWeirdUserName(ExtensionContext extensionContext, String clusterName, String userName, SecurityProtocol securityProtocol, String topicName) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String connectorPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-connect").get(0).getMetadata().getName();
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("topics", topicName).addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).endSpec().build());
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(securityProtocol).withTopicName(topicName).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, connectorPodName, Constants.DEFAULT_SINK_FILE_PATH, "\"Hello-world - 99\"");
}
Aggregations