use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class OauthAuthorizationIsolatedST method testKeycloakAuthorizerToDelegateToSimpleAuthorizer.
@ParallelNamespaceTest
@Order(10)
void testKeycloakAuthorizerToDelegateToSimpleAuthorizer(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext);
// we have to create keycloak, team-a-client and team-b-client secret from `infra-namespace` to the new namespace
resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), KeycloakInstance.KEYCLOAK_SECRET_NAME));
resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), TEAM_A_CLIENT_SECRET));
resourceManager.createResource(extensionContext, kubeClient().getSecret(clusterOperator.getDeploymentNamespace(), TEAM_B_CLIENT_SECRET));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 1, 1).editSpec().editKafka().withListeners(OauthAbstractST.BUILD_OAUTH_TLS_LISTENER.apply(keycloakInstance)).withNewKafkaAuthorizationKeycloak().withClientId(KAFKA_CLIENT_ID).withDisableTlsHostnameVerification(true).withDelegateToKafkaAcls(true).withTlsTrustedCertificates(new CertSecretSourceBuilder().withSecretName(KeycloakInstance.KEYCLOAK_SECRET_NAME).withCertificate(KeycloakInstance.KEYCLOAK_SECRET_CERT).build()).withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).endKafkaAuthorizationKeycloak().endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), TEAM_A_CLIENT).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), testStorage.getClusterName(), TEAM_B_CLIENT).build());
final String teamAProducerName = TEAM_A_PRODUCER_NAME + "-" + testStorage.getClusterName();
final String teamAConsumerName = TEAM_A_CONSUMER_NAME + "-" + testStorage.getClusterName();
final String topicName = TOPIC_A + "-" + testStorage.getTopicName();
final String consumerGroup = "a-consumer_group-" + testStorage.getConsumerName();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topicName, testStorage.getNamespaceName()).build());
KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(testStorage.getNamespaceName()).withProducerName(teamAProducerName).withConsumerName(teamAConsumerName).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withConsumerGroup(consumerGroup).withOauthClientId(TEAM_A_CLIENT).withOauthClientSecret(TEAM_A_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
resourceManager.createResource(extensionContext, teamAOauthClientJob.producerStrimziOauthTls(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(teamAProducerName, testStorage.getNamespaceName(), MESSAGE_COUNT);
resourceManager.createResource(extensionContext, teamAOauthClientJob.consumerStrimziOauthTls(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(teamAConsumerName, testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class UserST method testCreatingUsersWithSecretPrefix.
@ParallelNamespaceTest
void testCreatingUsersWithSecretPrefix(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String secretPrefix = "top-secret-";
final String tlsUserName = "encrypted-leopold";
final String scramShaUserName = "scramed-leopold";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getClusterName(), 3).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().editEntityOperator().editUserOperator().withSecretPrefix(secretPrefix).endUserOperator().endEntityOperator().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), tlsUserName).build(), KafkaUserTemplates.scramShaUser(testStorage.getClusterName(), scramShaUserName).build());
Secret tlsSecret = kubeClient().getSecret(testStorage.getNamespaceName(), secretPrefix + tlsUserName);
Secret scramShaSecret = kubeClient().getSecret(testStorage.getNamespaceName(), secretPrefix + scramShaUserName);
LOGGER.info("Checking if user secrets with secret prefixes exists");
assertNotNull(tlsSecret);
assertNotNull(scramShaSecret);
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(secretPrefix + tlsUserName).build();
LOGGER.info("Checking if TLS user is able to send messages");
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withUserName(secretPrefix + scramShaUserName).build();
LOGGER.info("Checking if SCRAM-SHA user is able to send messages");
resourceManager.createResource(extensionContext, clients.producerScramShaPlainStrimzi(), clients.consumerScramShaPlainStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Checking owner reference - if the secret will be deleted when we delete KafkaUser");
LOGGER.info("Deleting KafkaUser:{}", tlsUserName);
KafkaUserResource.kafkaUserClient().inNamespace(testStorage.getNamespaceName()).withName(tlsUserName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(testStorage.getNamespaceName(), tlsUserName);
LOGGER.info("Deleting KafkaUser:{}", scramShaUserName);
KafkaUserResource.kafkaUserClient().inNamespace(testStorage.getNamespaceName()).withName(scramShaUserName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(testStorage.getNamespaceName(), scramShaUserName);
LOGGER.info("Checking if secrets are deleted");
SecretUtils.waitForSecretDeletion(testStorage.getNamespaceName(), tlsSecret.getMetadata().getName());
SecretUtils.waitForSecretDeletion(testStorage.getNamespaceName(), scramShaSecret.getMetadata().getName());
assertNull(kubeClient().getSecret(testStorage.getNamespaceName(), tlsSecret.getMetadata().getName()));
assertNull(kubeClient().getSecret(testStorage.getNamespaceName(), scramShaSecret.getMetadata().getName()));
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class AlternativeReconcileTriggersST method testManualTriggeringRollingUpdate.
@ParallelNamespaceTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
@SuppressWarnings("checkstyle:MethodLength")
void testManualTriggeringRollingUpdate(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
final String continuousTopicName = "continuous-topic";
final String continuousProducerName = "continuous-" + testStorage.getProducerName();
final String continuousConsumerName = "continuous-" + testStorage.getConsumerName();
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build());
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
Map<String, String> zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients continuousClients = new KafkaClientsBuilder().withProducerName(continuousProducerName).withConsumerName(continuousConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(testStorage.getNamespaceName()).build();
resourceManager.createResource(extensionContext, continuousClients.producerStrimzi(), continuousClients.consumerStrimzi());
// ##############################
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withNamespaceName(testStorage.getNamespaceName()).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getProducerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// rolling update for kafka
// set annotation to trigger Kafka rolling update
LOGGER.info("Annotate Kafka {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, kafkaName);
StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), kafkaName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), 3, kafkaPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// rolling update for zookeeper
// set annotation to trigger Zookeeper rolling update
LOGGER.info("Annotate Zookeeper {} {} with manual rolling update annotation", Environment.isStrimziPodSetEnabled() ? StrimziPodSet.RESOURCE_KIND : Constants.STATEFUL_SET, zkName);
StUtils.annotateStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName, Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"));
// check annotation to trigger rolling update
assertThat(Boolean.parseBoolean(StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).get(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)), is(true));
RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), 3, zkPods);
// wait when annotation will be removed
TestUtils.waitFor("CO removes rolling update annotation", Constants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName) == null || !StUtils.getAnnotationsOfStatefulSetOrStrimziPodSet(testStorage.getNamespaceName(), zkName).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE));
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), newTopicName, 1, 1).build());
clients = new KafkaClientsBuilder(clients).withTopicName(newTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitForClientsSuccess(continuousProducerName, continuousConsumerName, testStorage.getNamespaceName(), continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class RollingUpdateST method testKafkaAndZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(SCALABILITY)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testKafkaAndZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).editSpec().editKafka().addToConfig(singletonMap("default.replication.factor", 1)).addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
testDockerImagesForKafkaCluster(testStorage.getClusterName(), clusterOperator.getDeploymentNamespace(), testStorage.getNamespaceName(), 3, 3, false);
LOGGER.info("Running kafkaScaleUpScaleDown {}", testStorage.getClusterName());
final int initialReplicas = kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withLabelSelector(testStorage.getKafkaSelector()).list().getItems().size();
assertEquals(3, initialReplicas);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName(), 3, initialReplicas, initialReplicas).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// scale up
final int scaleTo = initialReplicas + 4;
LOGGER.info("Scale up Kafka to {}", scaleTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), kafka -> {
kafka.getSpec().getKafka().setReplicas(scaleTo);
}, testStorage.getNamespaceName());
kafkaPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), scaleTo, kafkaPods);
LOGGER.info("Kafka scale up to {} finished", scaleTo);
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
assertThat((int) kubeClient().listPersistentVolumeClaims().stream().filter(pvc -> pvc.getMetadata().getName().contains(KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()))).count(), is(scaleTo));
final int zookeeperScaleTo = initialReplicas + 2;
Map<String, String> zooKeeperPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector());
LOGGER.info("Scale up Zookeeper to {}", zookeeperScaleTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(zookeeperScaleTo), testStorage.getNamespaceName());
zooKeeperPods = RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), zookeeperScaleTo, zooKeeperPods);
LOGGER.info("Kafka scale up to {} finished", zookeeperScaleTo);
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// scale down
LOGGER.info("Scale down Kafka to {}", initialReplicas);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getKafka().setReplicas(initialReplicas), testStorage.getNamespaceName());
RollingUpdateUtils.waitForComponentScaleUpOrDown(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), initialReplicas, kafkaPods);
LOGGER.info("Kafka scale down to {} finished", initialReplicas);
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
assertThat(kubeClient().listPersistentVolumeClaims(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(pvc -> pvc.getMetadata().getName().contains("data-" + KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()))).collect(Collectors.toList()).size(), is(initialReplicas));
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), newTopicName).build());
clients = new KafkaClientsBuilder(clients).withTopicName(newTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
}
use of io.strimzi.systemtest.storage.TestStorage in project strimzi-kafka-operator by strimzi.
the class RollingUpdateST method testZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), 3, 3).build(), KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).build(), KafkaUserTemplates.tlsUser(testStorage.getClusterName(), testStorage.getUserName()).build());
// kafka cluster already deployed
LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", testStorage.getClusterName());
final int initialZkReplicas = kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withLabelSelector(testStorage.getZookeeperSelector()).list().getItems().size();
assertThat(initialZkReplicas, is(3));
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).withUserName(testStorage.getUserName()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
final int scaleZkTo = initialZkReplicas + 4;
final List<String> newZkPodNames = new ArrayList<String>() {
{
for (int i = initialZkReplicas; i < scaleZkTo; i++) {
add(KafkaResources.zookeeperPodName(testStorage.getClusterName(), i));
}
}
};
LOGGER.info("Scale up Zookeeper to {}", scaleZkTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(scaleZkTo), testStorage.getNamespaceName());
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), scaleZkTo);
// check the new node is either in leader or follower state
KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2, 3, 4, 5, 6);
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String scaleUpTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleUpTopicName, 1, 1).build());
clients = new KafkaClientsBuilder(clients).withTopicName(scaleUpTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// scale down
LOGGER.info("Scale down Zookeeper to {}", initialZkReplicas);
// Get zk-3 uid before deletion
String uid = kubeClient(testStorage.getNamespaceName()).getPodUid(newZkPodNames.get(3));
KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().getZookeeper().setReplicas(initialZkReplicas), testStorage.getNamespaceName());
RollingUpdateUtils.waitForComponentAndPodsReady(testStorage.getNamespaceName(), testStorage.getZookeeperSelector(), initialZkReplicas);
clients = new KafkaClientsBuilder(clients).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
// Wait for one zk pods will became leader and others follower state
KafkaUtils.waitForZkMntr(testStorage.getNamespaceName(), testStorage.getClusterName(), ZK_SERVER_STATE, 0, 1, 2);
resourceManager.createResource(extensionContext, clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String scaleDownTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), scaleDownTopicName, 1, 1).build());
clients = new KafkaClientsBuilder(clients).withTopicName(scaleDownTopicName).withConsumerGroup(ClientUtils.generateRandomConsumerGroup()).build();
resourceManager.createResource(extensionContext, clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName()));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
// Test that the second pod has event 'Killing'
assertThat(kubeClient(testStorage.getNamespaceName()).listEventsByResourceUid(uid), hasAllOfReasons(Killing));
}
Aggregations