use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi-kafka-operator by strimzi.
the class MultipleListenersST method runListenersTest.
private void runListenersTest(ExtensionContext extensionContext, List<GenericKafkaListener> listeners, String clusterName) throws Exception {
LOGGER.info("This is listeners {}, which will verified.", listeners);
// exercise phase
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(listeners).endKafka().endSpec().build());
// only on thread can access to verification phase (here is a lot of variables which can be modified in run-time (data-race))
synchronized (lock) {
String kafkaUsername = KafkaUserUtils.generateRandomNameOfKafkaUser();
KafkaUser kafkaUserInstance = KafkaUserTemplates.tlsUser(namespace, clusterName, kafkaUsername).build();
resourceManager.createResource(extensionContext, kafkaUserInstance);
for (GenericKafkaListener listener : listeners) {
String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespace).build());
boolean isTlsEnabled = listener.isTls();
if (listener.getType() != KafkaListenerType.INTERNAL) {
if (isTlsEnabled) {
ExternalKafkaClient externalTlsKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(kafkaUsername).withListenerName(listener.getName()).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(listener.getName()).build();
LOGGER.info("Verifying {} listener", Constants.TLS_LISTENER_DEFAULT_NAME);
// verify phase
externalTlsKafkaClient.verifyProducedAndConsumedMessages(externalTlsKafkaClient.sendMessagesTls(), externalTlsKafkaClient.receiveMessagesTls());
} else {
ExternalKafkaClient externalPlainKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(listener.getName()).build();
LOGGER.info("Verifying {} listener", Constants.PLAIN_LISTENER_DEFAULT_NAME);
// verify phase
externalPlainKafkaClient.verifyProducedAndConsumedMessages(externalPlainKafkaClient.sendMessagesPlain(), externalPlainKafkaClient.receiveMessagesPlain());
}
} else {
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
// using internal clients
if (isTlsEnabled) {
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName + "-tls", listener.getName(), kafkaUserInstance).editMetadata().withNamespace(namespace).endMetadata().build());
final String kafkaClientsTlsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(namespace, kafkaClientsName + "-tls").get(0).getMetadata().getName();
InternalKafkaClient internalTlsKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsTlsPodName).withListenerName(listener.getName()).withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withKafkaUsername(kafkaUsername).withMessageCount(MESSAGE_COUNT).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsTlsPodName);
// verify phase
ClientUtils.waitUntilProducerAndConsumerSuccessfullySendAndReceiveMessages(extensionContext, internalTlsKafkaClient);
} else {
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName + "-plain").editMetadata().withNamespace(namespace).endMetadata().build());
final String kafkaClientsPlainPodName = ResourceManager.kubeClient().listPodsByPrefixInName(namespace, kafkaClientsName + "-plain").get(0).getMetadata().getName();
InternalKafkaClient internalPlainKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPlainPodName).withListenerName(listener.getName()).withTopicName(topicName).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPlainPodName);
// verify phase
internalPlainKafkaClient.checkProducedAndConsumedMessages(internalPlainKafkaClient.sendMessagesPlain(), internalPlainKafkaClient.receiveMessagesPlain());
}
}
}
}
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi-kafka-operator by strimzi.
the class SecurityST method testAclWithSuperUser.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testAclWithSuperUser(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().withSuperUsers("CN=" + userName).endKafkaAuthorizationSimple().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", userName, topicName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
LOGGER.info("Checking kafka super user:{} that is able to read messages to topic:{} regardless that " + "we configured Acls with only write operation", userName, topicName);
assertThat(externalKafkaClient.receiveMessagesTls(), is(MESSAGE_COUNT));
String nonSuperuserName = userName + "-non-super-user";
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, nonSuperuserName).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", nonSuperuserName, topicName);
externalKafkaClient = externalKafkaClient.toBuilder().withKafkaUsername(nonSuperuserName).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
LOGGER.info("Checking kafka super user:{} that is not able to read messages to topic:{} because of defined" + " ACLs on only write operation", nonSuperuserName, topicName);
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
assertThrows(GroupAuthorizationException.class, newExternalKafkaClient::receiveMessagesTls);
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi-kafka-operator by strimzi.
the class SpecificIsolatedST method testLoadBalancerSourceRanges.
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testLoadBalancerSourceRanges(ExtensionContext extensionContext) {
String networkInterfaces = Exec.exec("ip", "route").out();
Pattern ipv4InterfacesPattern = Pattern.compile("[0-9]+.[0-9]+.[0-9]+.[0-9]+\\/[0-9]+ dev (eth0|enp11s0u1).*");
Matcher ipv4InterfacesMatcher = ipv4InterfacesPattern.matcher(networkInterfaces);
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
ipv4InterfacesMatcher.find();
LOGGER.info(ipv4InterfacesMatcher.group(0));
String correctNetworkInterface = ipv4InterfacesMatcher.group(0);
String[] correctNetworkInterfaceStrings = correctNetworkInterface.split(" ");
String ipWithPrefix = correctNetworkInterfaceStrings[0];
LOGGER.info("Network address of machine with associated prefix is {}", ipWithPrefix);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.LOADBALANCER).withTls(false).withNewConfiguration().withLoadBalancerSourceRanges(Collections.singletonList(ipWithPrefix)).withFinalizers(LB_FINALIZERS).endConfiguration().build()).endKafka().endSpec().build());
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(TOPIC_NAME).withNamespaceName(INFRA_NAMESPACE).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesPlain(), externalKafkaClient.receiveMessagesPlain());
String invalidNetworkAddress = "255.255.255.111/30";
LOGGER.info("Replacing Kafka CR invalid load-balancer source range to {}", invalidNetworkAddress);
KafkaResource.replaceKafkaResource(clusterName, kafka -> kafka.getSpec().getKafka().setListeners(Collections.singletonList(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.LOADBALANCER).withTls(false).withNewConfiguration().withLoadBalancerSourceRanges(Collections.singletonList(ipWithPrefix)).withFinalizers(LB_FINALIZERS).endConfiguration().build())));
LOGGER.info("Expecting that clients will not be able to connect to external load-balancer service cause of invalid load-balancer source range.");
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withMessageCount(2 * MESSAGE_COUNT).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
assertThrows(TimeoutException.class, () -> newExternalKafkaClient.verifyProducedAndConsumedMessages(newExternalKafkaClient.sendMessagesPlain(), newExternalKafkaClient.receiveMessagesPlain()));
}
use of io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient in project strimzi-kafka-operator by strimzi.
the class SecurityST method testAclRuleReadAndWrite.
@ParallelNamespaceTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testAclRuleReadAndWrite(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaUserWrite = "kafka-user-write";
final String kafkaUserRead = "kafka-user-read";
final int numberOfMessages = 500;
final String consumerGroupName = "consumer-group-name-1";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editSpec().editKafka().withNewKafkaAuthorizationSimple().endKafkaAuthorizationSimple().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(new KafkaListenerAuthenticationTls()).build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserWrite).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
LOGGER.info("Checking KafkaUser {} that is able to send messages to topic '{}'", kafkaUserWrite, topicName);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(kafkaUserWrite).withMessageCount(numberOfMessages).withSecurityProtocol(SecurityProtocol.SSL).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
assertThat(externalKafkaClient.sendMessagesTls(), is(numberOfMessages));
assertThrows(GroupAuthorizationException.class, externalKafkaClient::receiveMessagesTls);
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, kafkaUserRead).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleGroupResource().withName(consumerGroupName).endAclRuleGroupResource().withOperation(AclOperation.READ).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(// s describe is for that user can find out metadata
AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build());
ExternalKafkaClient newExternalKafkaClient = externalKafkaClient.toBuilder().withKafkaUsername(kafkaUserRead).withConsumerGroupName(consumerGroupName).build();
assertThat(newExternalKafkaClient.receiveMessagesTls(), is(numberOfMessages));
LOGGER.info("Checking KafkaUser {} that is not able to send messages to topic '{}'", kafkaUserRead, topicName);
assertThrows(Exception.class, newExternalKafkaClient::sendMessagesTls);
}
Aggregations