use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class MirrorMakerIsolatedST method testMirrorMakerTlsScramSha.
/**
* Test mirroring messages by Mirror Maker over tls transport using scram-sha auth
*/
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings("checkstyle:methodlength")
void testMirrorMakerTlsScramSha(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
String kafkaSourceUserName = testStorage.getUserName() + "-source";
String kafkaTargetUserName = testStorage.getUserName() + "-target";
// Deploy source kafka with tls listener and SCRAM-SHA authentication
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
// Deploy target kafka with tls listener and SCRAM-SHA authentication
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
// Deploy topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName()).build(), KafkaUserTemplates.scramShaUser(kafkaClusterSourceName, kafkaSourceUserName).build(), KafkaUserTemplates.scramShaUser(kafkaClusterTargetName, kafkaTargetUserName).build());
// Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
PasswordSecretSource passwordSecretSource = new PasswordSecretSource();
passwordSecretSource.setSecretName(kafkaSourceUserName);
passwordSecretSource.setPassword("password");
// Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
PasswordSecretSource passwordSecretTarget = new PasswordSecretSource();
passwordSecretTarget.setSecretName(kafkaTargetUserName);
passwordSecretTarget.setPassword("password");
// Initialize CertSecretSource with certificate and secret names for consumer
CertSecretSource certSecretSource = new CertSecretSource();
certSecretSource.setCertificate("ca.crt");
certSecretSource.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterSourceName));
// Initialize CertSecretSource with certificate and secret names for producer
CertSecretSource certSecretTarget = new CertSecretSource();
certSecretTarget.setCertificate("ca.crt");
certSecretTarget.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterTargetName));
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withUserName(kafkaSourceUserName).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerScramShaTlsStrimzi(kafkaClusterSourceName), clients.consumerScramShaTlsStrimzi(kafkaClusterSourceName));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Deploy Mirror Maker with TLS and ScramSha512
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, true).editSpec().editConsumer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaSourceUserName).withPasswordSecret(passwordSecretSource).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretSource).endTls().endConsumer().editProducer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaTargetUserName).withPasswordSecret(passwordSecretTarget).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretTarget).endTls().endProducer().endSpec().build());
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(kafkaClusterTargetName)).withUserName(kafkaTargetUserName).build();
resourceManager.createResource(extensionContext, clients.consumerScramShaTlsStrimzi(kafkaClusterTargetName));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class MirrorMakerIsolatedST method testScaleMirrorMakerUpAndDownToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
void testScaleMirrorMakerUpAndDownToZero(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterTargetName, kafkaClusterSourceName, ClientUtils.generateRandomConsumerGroup(), 1, false).build());
int scaleTo = 2;
long mmObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName());
String mmGenName = kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getGenerateName();
LOGGER.info("-------> Scaling KafkaMirrorMaker up <-------");
LOGGER.info("Scaling subresource replicas to {}", scaleTo);
cmdKubeClient().namespace(testStorage.getNamespaceName()).scaleByName(KafkaMirrorMaker.RESOURCE_KIND, testStorage.getClusterName(), scaleTo);
DeploymentUtils.waitForDeploymentAndPodsReady(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), scaleTo);
LOGGER.info("Check if replicas is set to {}, naming prefix should be same and observed generation higher", scaleTo);
List<String> mmPods = kubeClient().listPodNames(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND);
assertThat(mmPods.size(), is(scaleTo));
assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getSpec().getReplicas(), is(scaleTo));
assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getReplicas(), is(scaleTo));
/*
observed generation should be higher than before scaling -> after change of spec and successful reconciliation,
the observed generation is increased
*/
long actualObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
assertTrue(mmObsGen < actualObsGen);
for (String pod : mmPods) {
assertTrue(pod.contains(mmGenName));
}
mmObsGen = actualObsGen;
LOGGER.info("-------> Scaling KafkaMirrorMaker down <-------");
LOGGER.info("Scaling MirrorMaker to zero");
KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(testStorage.getClusterName(), mm -> mm.getSpec().setReplicas(0), testStorage.getNamespaceName());
PodUtils.waitForPodsReady(testStorage.getNamespaceName(), kubeClient().getDeploymentSelectors(testStorage.getNamespaceName(), mmDepName), 0, true);
mmPods = kubeClient().listPodNames(testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND);
KafkaMirrorMakerStatus mmStatus = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus();
actualObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration();
assertThat(mmPods.size(), is(0));
assertThat(mmStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(actualObsGen, is(not(mmObsGen)));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class MirrorMakerIsolatedST method testMirrorMaker.
@ParallelNamespaceTest
void testMirrorMaker(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Deploy Mirror Maker
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(testStorage.getClusterName(), kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, false).editSpec().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("400M")).addToLimits("cpu", new Quantity("2")).addToRequests("memory", new Quantity("300M")).addToRequests("cpu", new Quantity("1")).build()).withNewJvmOptions().withXmx("200m").withXms("200m").withXx(jvmOptionsXX).endJvmOptions().endSpec().build());
verifyLabelsOnPods(testStorage.getNamespaceName(), testStorage.getClusterName(), "mirror-maker", KafkaMirrorMaker.RESOURCE_KIND);
verifyLabelsForService(testStorage.getNamespaceName(), testStorage.getClusterName(), "mirror-maker", KafkaMirrorMaker.RESOURCE_KIND);
verifyLabelsForConfigMaps(testStorage.getNamespaceName(), kafkaClusterSourceName, null, kafkaClusterTargetName);
verifyLabelsForServiceAccounts(testStorage.getNamespaceName(), kafkaClusterSourceName, null);
String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName());
String mirrorMakerPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(mmDepName).get(0).getMetadata().getName();
String kafkaMirrorMakerLogs = kubeClient(testStorage.getNamespaceName()).logs(mirrorMakerPodName);
assertThat(kafkaMirrorMakerLogs, not(containsString("keytool error: java.io.FileNotFoundException: /opt/kafka/consumer-oauth-certs/**/* (No such file or directory)")));
String podName = kubeClient(testStorage.getNamespaceName()).listPodsByNamespace(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(n -> n.getMetadata().getName().startsWith(KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()))).findFirst().orElseThrow().getMetadata().getName();
assertResources(testStorage.getNamespaceName(), podName, mmDepName, "400M", "2", "300M", "1");
assertExpectedJavaOpts(testStorage.getNamespaceName(), podName, KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), "-Xmx200m", "-Xms200m", "-XX:+UseG1GC");
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).build();
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class KafkaST method testAppDomainLabels.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("TopicOperator is not supported by KRaft mode and is used in this test class")
void testAppDomainLabels(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withMessageCount(MESSAGE_COUNT).build();
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(testStorage.getNamespaceName()).listPods(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(pod -> pod.getMetadata().getName().startsWith(testStorage.getClusterName())).collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
Map<String, String> kafkaLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()));
LOGGER.info("Getting labels from stateful set of kafka resource");
verifyAppLabels(kafkaLabels);
Map<String, String> zooLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName()));
LOGGER.info("Getting labels from stateful set of zookeeper resource");
verifyAppLabels(zooLabels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(testStorage.getNamespaceName()).listServices(testStorage.getNamespaceName()).stream().filter(service -> service.getMetadata().getName().startsWith(testStorage.getClusterName())).collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(testStorage.getNamespaceName()).listSecrets(testStorage.getNamespaceName()).stream().filter(secret -> secret.getMetadata().getName().startsWith(testStorage.getClusterName()) && secret.getType().equals("Opaque")).collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(testStorage.getNamespaceName()).listConfigMapsInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName());
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
resourceManager.createResource(extensionContext, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi by strimzi.
the class ListenersST method testCustomCertLoadBalancerAndTlsRollingUpdate.
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
@Tag(INTERNAL_CLIENTS_USED)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings({ "checkstyle:MethodLength" })
void testCustomCertLoadBalancerAndTlsRollingUpdate(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext);
final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1;
final String clusterCustomCertServer2 = testStorage.getClusterName() + "-" + customCertServer2;
SecretUtils.createCustomSecret(clusterCustomCertServer1, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_1);
SecretUtils.createCustomSecret(clusterCustomCertServer2, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_2);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9113).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9114).withType(KafkaListenerType.LOADBALANCER).withTls(true).withNewConfiguration().withFinalizers(LB_FINALIZERS).endConfiguration().build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
String externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
String externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), testStorage.getClusterName() + "-cluster-ca-cert", "ca.crt");
String internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
LOGGER.info("Check if KafkaStatus certificates from external listeners are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
// External secret cert is same as internal in this case
assertThat(externalSecretCerts, is(internalCerts));
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(testStorage.getTopicName()).withNamespaceName(testStorage.getNamespaceName()).withClusterName(testStorage.getClusterName()).withKafkaUsername(testStorage.getUserName()).withMessageCount(MESSAGE_COUNT).withSecurityProtocol(SecurityProtocol.SSL).withCertificateAuthorityCertificateName(null).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
Map<String, String> kafkaSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9113).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9114).withType(KafkaListenerType.LOADBALANCER).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer1).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().withFinalizers(LB_FINALIZERS).endConfiguration().build()));
}, testStorage.getNamespaceName());
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(testStorage.getNamespaceName(), testStorage.getClusterName());
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
String internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(clusterCustomCertServer1).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesTls(), externalKafkaClient.receiveMessagesTls());
KafkaClients kafkaClients = new KafkaClientsBuilder().withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9113").withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withConsumerGroup("consumer-group-certs-6").withCaCertSecretName(clusterCustomCertServer2).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
kafkaClients = new KafkaClientsBuilder(kafkaClients).withMessageCount(3 * MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT * 3);
SecretUtils.createCustomSecret(clusterCustomCertServer1, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_2);
SecretUtils.createCustomSecret(clusterCustomCertServer2, testStorage.getClusterName(), testStorage.getNamespaceName(), STRIMZI_CERT_AND_KEY_1);
kafkaSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer1, "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
int sent = externalKafkaClient.sendMessagesTls() + MESSAGE_COUNT;
externalKafkaClient.setMessageCount(2 * MESSAGE_COUNT);
externalKafkaClient.verifyProducedAndConsumedMessages(sent, externalKafkaClient.receiveMessagesTls());
kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup("consumer-group-certs-71").withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
kafkaClients = new KafkaClientsBuilder(kafkaClients).withMessageCount(MESSAGE_COUNT * 5).build();
resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT * 5);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
kafka.getSpec().getKafka().setListeners(asList(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9113).withType(KafkaListenerType.INTERNAL).withTls(true).withNewConfiguration().withNewBrokerCertChainAndKey().withSecretName(clusterCustomCertServer2).withKey("ca.key").withCertificate("ca.crt").endBrokerCertChainAndKey().endConfiguration().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9114).withType(KafkaListenerType.LOADBALANCER).withNewConfiguration().withFinalizers(LB_FINALIZERS).endConfiguration().withTls(true).build()));
}, testStorage.getNamespaceName());
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaSnapshot);
KafkaUtils.waitForKafkaStatusUpdate(testStorage.getNamespaceName(), testStorage.getClusterName());
externalCerts = getKafkaStatusCertificates(Constants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), testStorage.getClusterName() + "-cluster-ca-cert", "ca.crt");
internalCerts = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, testStorage.getNamespaceName(), testStorage.getClusterName());
internalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer2, "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(externalSecretCerts, is(externalCerts));
LOGGER.info("Check if KafkaStatus certificates from internal TLS listener are the same as secret certificates");
assertThat(internalSecretCerts, is(internalCerts));
externalKafkaClient = externalKafkaClient.toBuilder().withCertificateAuthorityCertificateName(null).withMessageCount(MESSAGE_COUNT).build();
sent = externalKafkaClient.sendMessagesTls() + MESSAGE_COUNT;
externalKafkaClient.setMessageCount(2 * MESSAGE_COUNT);
externalKafkaClient.verifyProducedAndConsumedMessages(sent, externalKafkaClient.receiveMessagesTls());
kafkaClients = new KafkaClientsBuilder(kafkaClients).withConsumerGroup("consumer-group-certs-83").withMessageCount(MESSAGE_COUNT * 6).build();
resourceManager.createResource(extensionContext, kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT * 6);
}
Aggregations