use of io.strimzi.systemtest.annotations.MultiNodeClusterOnly in project strimzi-kafka-operator by strimzi.
the class ClusterOperationIsolatedST method testAvailabilityDuringNodeDrain.
@IsolatedTest
@MultiNodeClusterOnly
@RequiredMinKubeApiVersion(version = 1.15)
void testAvailabilityDuringNodeDrain(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> "test-topic-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-producer-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-consumer-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
int continuousClientsMessageCount = 300;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editOrNewSpec().editEntityOperator().editUserOperator().withReconciliationIntervalSeconds(30).endUserOperator().endEntityOperator().endSpec().build());
topicNames.forEach(topicName -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 3, 2).build()));
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
KafkaClients kafkaBasicClientResource;
for (int i = 0; i < size; i++) {
kafkaBasicClientResource = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicNames.get(i)).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroups.get(i)).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicClientResource.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientResource.consumerStrimzi());
}
// ##############################
// Nodes draining
// ##############################
kubeClient().getClusterWorkers().forEach(node -> {
NodeUtils.drainNode(node.getMetadata().getName());
NodeUtils.cordonNode(node.getMetadata().getName(), true);
});
producerNames.forEach(producerName -> ClientUtils.waitTillContinuousClientsFinish(producerName, consumerNames.get(producerName.indexOf(producerName)), NAMESPACE, continuousClientsMessageCount));
producerNames.forEach(producerName -> kubeClient().deleteJob(producerName));
consumerNames.forEach(consumerName -> kubeClient().deleteJob(consumerName));
}
use of io.strimzi.systemtest.annotations.MultiNodeClusterOnly in project strimzi by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.
@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
String rackKey = "rack-key";
final int replicas = 3;
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
// remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
KafkaClients kafkaBasicExampleClients;
for (int i = 0; i < size; i++) {
kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
}
LOGGER.info("Starting Node drain");
nodesWithPods.forEach((nodeName, podList) -> {
String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
NodeUtils.drainNode(nodeName);
NodeUtils.cordonNode(nodeName, true);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
});
producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
use of io.strimzi.systemtest.annotations.MultiNodeClusterOnly in project strimzi by strimzi.
the class ClusterOperationIsolatedST method testAvailabilityDuringNodeDrain.
@IsolatedTest
@MultiNodeClusterOnly
@RequiredMinKubeApiVersion(version = 1.15)
void testAvailabilityDuringNodeDrain(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> "test-topic-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-producer-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-consumer-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
int continuousClientsMessageCount = 300;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editOrNewSpec().editEntityOperator().editUserOperator().withReconciliationIntervalSeconds(30).endUserOperator().endEntityOperator().endSpec().build());
topicNames.forEach(topicName -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 3, 2).build()));
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
KafkaClients kafkaBasicClientResource;
for (int i = 0; i < size; i++) {
kafkaBasicClientResource = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicNames.get(i)).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroups.get(i)).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicClientResource.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientResource.consumerStrimzi());
}
// ##############################
// Nodes draining
// ##############################
kubeClient().getClusterWorkers().forEach(node -> {
NodeUtils.drainNode(node.getMetadata().getName());
NodeUtils.cordonNode(node.getMetadata().getName(), true);
});
producerNames.forEach(producerName -> ClientUtils.waitTillContinuousClientsFinish(producerName, consumerNames.get(producerName.indexOf(producerName)), NAMESPACE, continuousClientsMessageCount));
producerNames.forEach(producerName -> kubeClient().deleteJob(producerName));
consumerNames.forEach(consumerName -> kubeClient().deleteJob(consumerName));
}
use of io.strimzi.systemtest.annotations.MultiNodeClusterOnly in project strimzi-kafka-operator by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.
@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
String rackKey = "rack-key";
final int replicas = 3;
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
// remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
KafkaClients kafkaBasicExampleClients;
for (int i = 0; i < size; i++) {
kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
}
LOGGER.info("Starting Node drain");
nodesWithPods.forEach((nodeName, podList) -> {
String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
NodeUtils.drainNode(nodeName);
NodeUtils.cordonNode(nodeName, true);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
});
producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
Aggregations