use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class SecurityST method testTlsHostnameVerificationWithMirrorMaker.
@ParallelNamespaceTest
@Tag(MIRROR_MAKER)
void testTlsHostnameVerificationWithMirrorMaker(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String sourceKafkaCluster = clusterName + "-source";
final String targetKafkaCluster = clusterName + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(sourceKafkaCluster, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(targetKafkaCluster, 1, 1).build());
LOGGER.info("Getting IP of the source bootstrap service for consumer");
String ipOfSourceBootstrapService = kubeClient(namespaceName).getService(namespaceName, KafkaResources.bootstrapServiceName(sourceKafkaCluster)).getSpec().getClusterIP();
LOGGER.info("Getting IP of the target bootstrap service for producer");
String ipOfTargetBootstrapService = kubeClient(namespaceName).getService(namespaceName, KafkaResources.bootstrapServiceName(targetKafkaCluster)).getSpec().getClusterIP();
LOGGER.info("KafkaMirrorMaker without config {} will not connect to consumer with address {}:9093", "ssl.endpoint.identification.algorithm", ipOfSourceBootstrapService);
LOGGER.info("KafkaMirrorMaker without config {} will not connect to producer with address {}:9093", "ssl.endpoint.identification.algorithm", ipOfTargetBootstrapService);
resourceManager.createResource(extensionContext, false, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, sourceKafkaCluster, targetKafkaCluster, ClientUtils.generateRandomConsumerGroup(), 1, true).editSpec().editConsumer().withNewTls().addNewTrustedCertificate().withSecretName(KafkaResources.clusterCaCertificateSecretName(sourceKafkaCluster)).withCertificate("ca.crt").endTrustedCertificate().endTls().withBootstrapServers(ipOfSourceBootstrapService + ":9093").endConsumer().editProducer().withNewTls().addNewTrustedCertificate().withSecretName(KafkaResources.clusterCaCertificateSecretName(targetKafkaCluster)).withCertificate("ca.crt").endTrustedCertificate().endTls().withBootstrapServers(ipOfTargetBootstrapService + ":9093").endProducer().endSpec().build());
PodUtils.waitUntilPodIsPresent(namespaceName, clusterName + "-mirror-maker");
String kafkaMirrorMakerPodName = kubeClient(namespaceName).listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getName();
PodUtils.waitUntilPodIsInCrashLoopBackOff(namespaceName, kafkaMirrorMakerPodName);
assertThat("CrashLoopBackOff", is(kubeClient(namespaceName).getPod(namespaceName, kafkaMirrorMakerPodName).getStatus().getContainerStatuses().get(0).getState().getWaiting().getReason()));
LOGGER.info("KafkaMirrorMaker with config {} will connect to consumer with address {}:9093", "ssl.endpoint.identification.algorithm", ipOfSourceBootstrapService);
LOGGER.info("KafkaMirrorMaker with config {} will connect to producer with address {}:9093", "ssl.endpoint.identification.algorithm", ipOfTargetBootstrapService);
LOGGER.info("Adding configuration {} to the mirror maker...", "ssl.endpoint.identification.algorithm");
KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(clusterName, mm -> {
// disable hostname verification
mm.getSpec().getConsumer().getConfig().put("ssl.endpoint.identification.algorithm", "");
// disable hostname verification
mm.getSpec().getProducer().getConfig().put("ssl.endpoint.identification.algorithm", "");
}, namespaceName);
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(namespaceName, clusterName);
KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(namespaceName).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName));
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class SecurityST method testCustomClusterCAClientsCA.
@ParallelNamespaceTest
void testCustomClusterCAClientsCA(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
generateAndDeployCustomStrimziCA(namespaceName, clusterName);
checkCustomCAsCorrectness(namespaceName, clusterName);
LOGGER.info(" Deploy kafka with new certs/secrets.");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().withNewClusterCa().withGenerateCertificateAuthority(false).endClusterCa().withNewClientsCa().withGenerateCertificateAuthority(false).endClientsCa().editKafka().withListeners(new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withTls(false).build(), new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().endSpec().build());
LOGGER.info("Check Kafka(s) and Zookeeper(s) certificates.");
X509Certificate kafkaCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, clusterName + "-kafka-brokers"), clusterName + "-kafka-0.crt");
assertThat("KafkaCert does not have expected test Issuer: " + kafkaCert.getIssuerDN(), SystemTestCertManager.containsAllDN(kafkaCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLUSTER_CA));
X509Certificate zookeeperCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, clusterName + "-zookeeper-nodes"), clusterName + "-zookeeper-0.crt");
assertThat("ZookeeperCert does not have expected test Subject: " + zookeeperCert.getIssuerDN(), SystemTestCertManager.containsAllDN(zookeeperCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLUSTER_CA));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
LOGGER.info("Check KafkaUser certificate.");
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
X509Certificate userCert = SecretUtils.getCertificateFromSecret(kubeClient(namespaceName).getSecret(namespaceName, userName), "user.crt");
assertThat("Generated ClientsCA does not have expected test Subject: " + userCert.getIssuerDN(), SystemTestCertManager.containsAllDN(userCert.getIssuerX500Principal().getName(), STRIMZI_TEST_CLIENTS_CA));
LOGGER.info("Send and receive messages over TLS.");
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, user).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Check for certificates used within kafka pod internal clients (producer/consumer)");
List<VolumeMount> volumeMounts = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getSpec().getContainers().get(0).getVolumeMounts();
for (VolumeMount vm : volumeMounts) {
if (vm.getMountPath().contains("user-secret-" + internalKafkaClient.getKafkaUsername())) {
assertThat("UserCert Issuer DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "issuer", STRIMZI_INTERMEDIATE_CA));
assertThat("UserCert Subject DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "subject", STRIMZI_TEST_CLIENTS_CA));
} else if (vm.getMountPath().contains("cluster-ca-" + internalKafkaClient.getKafkaUsername())) {
assertThat("ClusterCA Issuer DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "issuer", STRIMZI_INTERMEDIATE_CA));
assertThat("ClusterCA Subject DN in clients pod is incorrect!", checkMountVolumeSecret(namespaceName, kafkaClientsPodName, vm, "subject", STRIMZI_TEST_CLUSTER_CA));
}
}
LOGGER.info("Checking produced and consumed messages via TLS to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class SecurityST method testCaRenewalBreakInMiddle.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testCaRenewalBreakInMiddle(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().withNewClusterCa().withRenewalDays(1).withValidityDays(3).endClusterCa().endSpec().build());
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, clusterName + "-" + Constants.KAFKA_CLIENTS, user).build());
String defaultKafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withKafkaUsername(userName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(defaultKafkaClientsPodName).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
Map<String, String> zkPods = PodUtils.podSnapshot(namespaceName, zkSelector);
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
InputStream secretInputStream = getClass().getClassLoader().getResourceAsStream("security-st-certs/expired-cluster-ca.crt");
String clusterCaCert = TestUtils.readResource(secretInputStream);
SecretUtils.createSecret(namespaceName, clusterCaCertificateSecretName(clusterName), "ca.crt", clusterCaCert);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getZookeeper().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100000m")).build());
k.getSpec().setClusterCa(new CertificateAuthorityBuilder().withRenewalDays(4).withValidityDays(7).build());
}, namespaceName);
TestUtils.waitFor("Waiting for some kafka pod to be in the pending phase because of selected high cpu resource", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> {
List<Pod> pendingPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).stream().filter(pod -> pod.getStatus().getPhase().equals("Pending")).collect(Collectors.toList());
if (pendingPods.isEmpty()) {
LOGGER.info("No pods of {} are in desired state", KafkaResources.zookeeperStatefulSetName(clusterName));
return false;
} else {
LOGGER.info("Pod in 'Pending' state: {}", pendingPods.get(0).getMetadata().getName());
return true;
}
});
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getZookeeper().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("200m")).build());
}, namespaceName);
// Wait until the certificates have been replaced
SecretUtils.waitForCertToChange(namespaceName, clusterCaCert, KafkaResources.clusterCaCertificateSecretName(clusterName));
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, zkSelector, 3, zkPods);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, kafkaSelector, 3, kafkaPods);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", internalKafkaClient.getPodName());
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
// Try to send and receive messages with new certificates
topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).withTopicName(topicName).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class UserST method testCreatingUsersWithSecretPrefix.
@ParallelNamespaceTest
void testCreatingUsersWithSecretPrefix(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String secretPrefix = "top-secret-";
final String tlsUserName = "encrypted-leopold";
final String scramShaUserName = "scramed-leopold";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().editEntityOperator().editUserOperator().withSecretPrefix(secretPrefix).endUserOperator().endEntityOperator().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaUser tlsUser = KafkaUserTemplates.tlsUser(clusterName, tlsUserName).build();
KafkaUser scramShaUser = KafkaUserTemplates.scramShaUser(clusterName, scramShaUserName).build();
resourceManager.createResource(extensionContext, tlsUser);
resourceManager.createResource(extensionContext, scramShaUser);
LOGGER.info("Deploying KafkaClients pod for TLS listener");
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, true, clusterName + "-tls-" + Constants.KAFKA_CLIENTS, true, Constants.TLS_LISTENER_DEFAULT_NAME, secretPrefix, tlsUser).build());
String tlsKafkaClientsName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-tls-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
LOGGER.info("Deploying KafkaClients pod for PLAIN listener");
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, false, clusterName + "-plain-" + Constants.KAFKA_CLIENTS, true, Constants.PLAIN_LISTENER_DEFAULT_NAME, secretPrefix, scramShaUser).build());
String plainKafkaClientsName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-plain-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
Secret tlsSecret = kubeClient(namespaceName).getSecret(secretPrefix + tlsUserName);
Secret scramShaSecret = kubeClient(namespaceName).getSecret(secretPrefix + scramShaUserName);
LOGGER.info("Checking if user secrets with secret prefixes exists");
assertNotNull(tlsSecret);
assertNotNull(scramShaSecret);
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(tlsKafkaClientsName).withNamespaceName(namespaceName).withTopicName(TOPIC_NAME).withKafkaUsername(tlsUserName).withSecurityProtocol(SecurityProtocol.SASL_SSL).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withSecretPrefix(secretPrefix).build();
LOGGER.info("Checking if TLS user is able to send messages");
internalKafkaClient.assertSentAndReceivedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
internalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(plainKafkaClientsName).withSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withKafkaUsername(scramShaUserName).build();
LOGGER.info("Checking if SCRAM-SHA user is able to send messages");
internalKafkaClient.assertSentAndReceivedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
LOGGER.info("Checking owner reference - if the secret will be deleted when we delete KafkaUser");
LOGGER.info("Deleting KafkaUser:{}", tlsUserName);
KafkaUserResource.kafkaUserClient().inNamespace(namespaceName).withName(tlsUserName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(namespaceName, tlsUserName);
LOGGER.info("Deleting KafkaUser:{}", scramShaUserName);
KafkaUserResource.kafkaUserClient().inNamespace(namespaceName).withName(scramShaUserName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(namespaceName, scramShaUserName);
LOGGER.info("Checking if secrets are deleted");
SecretUtils.waitForSecretDeletion(namespaceName, tlsSecret.getMetadata().getName());
SecretUtils.waitForSecretDeletion(namespaceName, scramShaSecret.getMetadata().getName());
assertNull(kubeClient(namespaceName).getSecret(namespaceName, tlsSecret.getMetadata().getName()));
assertNull(kubeClient(namespaceName).getSecret(namespaceName, scramShaSecret.getMetadata().getName()));
}
use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.
the class AlternativeReconcileTriggersST method testManualTriggeringRollingUpdate.
@ParallelNamespaceTest
@SuppressWarnings("checkstyle:MethodLength")
void testManualTriggeringRollingUpdate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String continuousTopicName = "continuous-topic";
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
final String producerName = "hello-world-producer";
final String consumerName = "hello-world-consumer";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build());
String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
String zkName = KafkaResources.zookeeperStatefulSetName(clusterName);
LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, zkName);
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(namespaceName, zkSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, namespaceName).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// ##############################
String userName = KafkaUserUtils.generateRandomNameOfKafkaUser();
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// rolling update for kafka
// set annotation to trigger Kafka rolling update
LOGGER.info("Annotate Kafka {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, kafkaName);
StUtils.annotateStatefulSetOrStrimziPodSet(namespaceName, kafkaName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, kafkaName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
// rolling update for zookeeper
// set annotation to trigger Zookeeper rolling update
LOGGER.info("Annotate Zookeeper {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, zkName);
StUtils.annotateStatefulSetOrStrimziPodSet(namespaceName, zkName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, zkName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, zkSelector, 3, zkPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(namespaceName, zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, newTopicName, 1, 1).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(newTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
int sent = internalKafkaClient.sendMessagesTls();
assertThat(sent, is(MESSAGE_COUNT));
received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(sent));
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, namespaceName, continuousClientsMessageCount);
// ##############################
}
Aggregations