use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponents.
@IsolatedTest
@RequiredMinKubeApiVersion(version = 1.17)
void testDrainCleanerWithComponents(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
final int replicas = 3;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build());
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
KafkaClients kafkaBasicExampleClients = new KafkaClientsBuilder().withMessageCount(300).withTopicName(testStorage.getTopicName()).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
for (int i = 0; i < replicas; i++) {
String zkPodName = KafkaResources.zookeeperPodName(testStorage.getClusterName(), i);
String kafkaPodName = KafkaResources.kafkaPodName(testStorage.getClusterName(), i);
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
LOGGER.info("Evicting pods: {} and {}", zkPodName, kafkaPodName);
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(zkPodName).evict();
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(kafkaPodName).evict();
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
}
ClientUtils.waitTillContinuousClientsFinish(testStorage.getProducerName(), testStorage.getConsumerName(), Constants.DRAIN_CLEANER_NAMESPACE, 300);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.
@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
String rackKey = "rack-key";
final int replicas = 3;
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
// remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
KafkaClients kafkaBasicExampleClients;
for (int i = 0; i < size; i++) {
kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
}
LOGGER.info("Starting Node drain");
nodesWithPods.forEach((nodeName, podList) -> {
String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
NodeUtils.drainNode(nodeName);
NodeUtils.cordonNode(nodeName, true);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
});
producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class AlternativeReconcileTriggersST method testAddingAndRemovingJbodVolumes.
/**
* Adding and removing JBOD volumes requires rolling updates in the sequential order. Otherwise the StatefulSet does
* not like it. This tests tries to add and remove volume from JBOD to test both of these situations.
*/
@ParallelNamespaceTest
void testAddingAndRemovingJbodVolumes(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String continuousTopicName = "continuous-topic";
// 500 messages will take 500 seconds in that case
final int continuousClientsMessageCount = 500;
final String producerName = "hello-world-producer";
final String consumerName = "hello-world-consumer";
PersistentClaimStorage vol0 = new PersistentClaimStorageBuilder().withId(0).withSize("1Gi").withDeleteClaim(true).build();
PersistentClaimStorage vol1 = new PersistentClaimStorageBuilder().withId(1).withSize("1Gi").withDeleteClaim(true).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, 3, 3, new JbodStorageBuilder().addToVolumes(vol0).build()).build());
final String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName);
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName);
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
// ##############################
// Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update
// ##############################
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, continuousTopicName, 3, 3, 2).build());
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
// Add transactional id to make producer transactional
producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + continuousTopicName + ".1");
producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true");
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withDelayMs(1000).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// ##############################
String userName = KafkaUserUtils.generateRandomNameOfKafkaUser();
KafkaUser user = KafkaUserTemplates.tlsUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// Add Jbod volume to Kafka => triggers RU
LOGGER.info("Add JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().add(vol1);
}, namespaceName);
// Wait util it rolls
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// Remove Jbod volume to Kafka => triggers RU
LOGGER.info("Remove JBOD volume to the Kafka cluster {}", kafkaName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
JbodStorage storage = (JbodStorage) kafka.getSpec().getKafka().getStorage();
storage.getVolumes().remove(vol1);
}, namespaceName);
// Wait util it rolls
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, 3, kafkaPods);
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, namespaceName, continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class AbstractUpgradeST method setupEnvAndUpgradeClusterOperator.
protected void setupEnvAndUpgradeClusterOperator(ExtensionContext extensionContext, JsonObject testParameters, String producerName, String consumerName, String continuousTopicName, String continuousConsumerGroup, String kafkaVersion, String namespace) throws IOException {
int continuousClientsMessageCount = testParameters.getJsonObject("client").getInteger("continuousClientsMessages");
LOGGER.info("Test upgrade of ClusterOperator from version {} to version {}", testParameters.getString("fromVersion"), testParameters.getString("toVersion"));
cluster.setNamespace(namespace);
String operatorVersion = testParameters.getString("fromVersion");
String url = null;
File dir = null;
if ("HEAD".equals(testParameters.getString("fromVersion"))) {
coDir = new File(TestUtils.USER_PATH + "/../packaging/install/cluster-operator");
} else {
url = testParameters.getString("urlFrom");
dir = FileUtils.downloadAndUnzip(url);
coDir = new File(dir, testParameters.getString("fromExamples") + "/install/cluster-operator/");
}
// Modify + apply installation files
copyModifyApply(coDir, namespace, extensionContext, testParameters.getString("strimziFeatureGatesFlagsBefore"));
LOGGER.info("Waiting for {} deployment", ResourceManager.getCoDeploymentName());
DeploymentUtils.waitForDeploymentAndPodsReady(ResourceManager.getCoDeploymentName(), 1);
LOGGER.info("{} is ready", ResourceManager.getCoDeploymentName());
if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL, operatorVersion)).contains(clusterName)) {
// Deploy a Kafka cluster
if ("HEAD".equals(testParameters.getString("fromVersion"))) {
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withVersion(kafkaVersion).addToConfig("log.message.format.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).messageVersion()).addToConfig("inter.broker.protocol.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).protocolVersion()).endKafka().endSpec().build());
} else {
kafkaYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/kafka/kafka-persistent.yaml");
LOGGER.info("Deploy Kafka from: {}", kafkaYaml.getPath());
// Change kafka version of it's empty (null is for remove the version)
cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, kafkaVersion));
// Wait for readiness
waitForReadinessOfKafkaCluster();
}
}
if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion)).contains(userName)) {
if ("HEAD".equals(testParameters.getString("fromVersion"))) {
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
} else {
kafkaUserYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/user/kafka-user.yaml");
LOGGER.info("Deploy KafkaUser from: {}", kafkaUserYaml.getPath());
cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization"));
ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion), userName);
}
}
if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(topicName)) {
if ("HEAD".equals(testParameters.getString("fromVersion"))) {
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
} else {
kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
LOGGER.info("Deploy KafkaTopic from: {}", kafkaTopicYaml.getPath());
cmdKubeClient().create(kafkaTopicYaml);
ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), topicName);
}
}
// Create bunch of topics for upgrade if it's specified in configuration
if (testParameters.getBoolean("generateTopics")) {
for (int x = 0; x < upgradeTopicCount; x++) {
if ("HEAD".equals(testParameters.getString("fromVersion"))) {
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, topicName + "-" + x, 1, 1, 1).editSpec().withTopicName(topicName + "-" + x).endSpec().build());
} else {
kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + topicName + "-" + x + "\""));
}
}
}
if (continuousClientsMessageCount != 0) {
// Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(continuousTopicName)) {
String pathToTopicExamples = testParameters.getString("fromExamples").equals("HEAD") ? PATH_TO_KAFKA_TOPIC_CONFIG : testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml";
kafkaTopicYaml = new File(dir, pathToTopicExamples);
cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + continuousTopicName + "\"").replace("partitions: 1", "partitions: 3").replace("replicas: 1", "replicas: 3") + " min.insync.replicas: 2");
ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), continuousTopicName);
}
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroup).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
// ##############################
}
makeSnapshots();
logPodImages(clusterName);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class OauthScopeIsolatedST method testClientScopeKafkaSetCorrectly.
@ParallelTest
void testClientScopeKafkaSetCorrectly(ExtensionContext extensionContext) throws UnexpectedException {
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
final String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
KafkaClients oauthInternalClientChecksJob = new KafkaClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.bootstrapServiceName(oauthClusterName) + ":" + scopeListenerPort).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withAdditionalConfig(additionalOauthConfig).build();
// clientScope is set to 'test' by default
// verification phase the KafkaClient to authenticate.
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
resourceManager.createResource(extensionContext, oauthInternalClientChecksJob.producerStrimzi());
// client should succeeded because we set to `clientScope=test` and also Kafka has `scope=test`
ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
}
Aggregations