Search in sources :

Example 21 with AffinityBuilder

use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi-kafka-operator by strimzi.

the class KafkaExporterTest method testTemplate.

@ParallelTest
public void testTemplate() {
    Map<String, String> depLabels = TestUtils.map("l1", "v1", "l2", "v2", Labels.KUBERNETES_PART_OF_LABEL, "custom-part", Labels.KUBERNETES_MANAGED_BY_LABEL, "custom-managed-by");
    Map<String, String> expectedDepLabels = new HashMap<>(depLabels);
    expectedDepLabels.remove(Labels.KUBERNETES_MANAGED_BY_LABEL);
    Map<String, String> depAnots = TestUtils.map("a1", "v1", "a2", "v2");
    Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
    Map<String, String> podAnots = TestUtils.map("a3", "v3", "a4", "v4");
    Map<String, String> saLabels = TestUtils.map("l5", "v5", "l6", "v6");
    Map<String, String> saAnots = TestUtils.map("a5", "v5", "a6", "v6");
    Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
    List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
    TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
    TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
    Kafka resource = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)).editSpec().withNewKafkaExporter().withNewTemplate().withNewDeployment().withNewMetadata().withLabels(depLabels).withAnnotations(depAnots).endMetadata().endDeployment().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnots).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withAffinity(affinity).withTolerations(tolerations).withTopologySpreadConstraints(tsc1, tsc2).withEnableServiceLinks(false).endPod().withNewServiceAccount().withNewMetadata().withLabels(saLabels).withAnnotations(saAnots).endMetadata().endServiceAccount().endTemplate().endKafkaExporter().endSpec().build();
    KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS);
    // Check Deployment
    Deployment dep = ke.generateDeployment(true, null, null);
    assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true));
    assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true));
    // Check Pods
    assertThat(dep.getSpec().getTemplate().getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
    assertThat(dep.getSpec().getTemplate().getMetadata().getAnnotations().entrySet().containsAll(podAnots.entrySet()), is(true));
    assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority"));
    assertThat(dep.getSpec().getTemplate().getSpec().getSchedulerName(), is("my-scheduler"));
    assertThat(dep.getSpec().getTemplate().getSpec().getAffinity(), is(affinity));
    assertThat(dep.getSpec().getTemplate().getSpec().getTolerations(), is(tolerations));
    assertThat(dep.getSpec().getTemplate().getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
    assertThat(dep.getSpec().getTemplate().getSpec().getEnableServiceLinks(), is(false));
    // Check Service Account
    ServiceAccount sa = ke.generateServiceAccount();
    assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true));
    assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnots.entrySet()), is(true));
}
Also used : LabelSelectorBuilder(io.fabric8.kubernetes.api.model.LabelSelectorBuilder) ServiceAccount(io.fabric8.kubernetes.api.model.ServiceAccount) TolerationBuilder(io.fabric8.kubernetes.api.model.TolerationBuilder) HashMap(java.util.HashMap) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) TopologySpreadConstraint(io.fabric8.kubernetes.api.model.TopologySpreadConstraint) TopologySpreadConstraintBuilder(io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) Reconciliation(io.strimzi.operator.common.Reconciliation) Toleration(io.fabric8.kubernetes.api.model.Toleration) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeSelectorTermBuilder(io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 22 with AffinityBuilder

use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi-kafka-operator by strimzi.

the class ModelUtilsTest method testParsePodTemplate.

@Test
public void testParsePodTemplate() {
    Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().build();
    LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
    LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
    Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
    List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
    PodTemplate template = new PodTemplateBuilder().withNewMetadata().withAnnotations(Collections.singletonMap("annoKey", "annoValue")).withLabels(Collections.singletonMap("labelKey", "labelValue")).endMetadata().withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()).withImagePullSecrets(secret1, secret2).withTerminationGracePeriodSeconds(123).withAffinity(affinity).withTolerations(tolerations).build();
    Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka);
    ModelUtils.parsePodTemplate(model, template);
    assertThat(model.templatePodLabels, is(Collections.singletonMap("labelKey", "labelValue")));
    assertThat(model.templatePodAnnotations, is(Collections.singletonMap("annoKey", "annoValue")));
    assertThat(model.templateTerminationGracePeriodSeconds, is(123));
    assertThat(model.templateImagePullSecrets.size(), is(2));
    assertThat(model.templateImagePullSecrets.contains(secret1), is(true));
    assertThat(model.templateImagePullSecrets.contains(secret2), is(true));
    assertThat(model.templateSecurityContext, is(notNullValue()));
    assertThat(model.templateSecurityContext.getFsGroup(), is(Long.valueOf(123)));
    assertThat(model.templateSecurityContext.getRunAsGroup(), is(Long.valueOf(456)));
    assertThat(model.templateSecurityContext.getRunAsUser(), is(Long.valueOf(789)));
    assertThat(model.getUserAffinity(), is(affinity));
    assertThat(model.getTolerations(), is(tolerations));
}
Also used : PodTemplateBuilder(io.strimzi.api.kafka.model.template.PodTemplateBuilder) TolerationBuilder(io.fabric8.kubernetes.api.model.TolerationBuilder) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) Kafka(io.strimzi.api.kafka.model.Kafka) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) PodSecurityContextBuilder(io.fabric8.kubernetes.api.model.PodSecurityContextBuilder) LocalObjectReference(io.fabric8.kubernetes.api.model.LocalObjectReference) Toleration(io.fabric8.kubernetes.api.model.Toleration) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeSelectorTermBuilder(io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder) PodTemplate(io.strimzi.api.kafka.model.template.PodTemplate) ParallelTest(io.strimzi.test.annotations.ParallelTest) Test(org.junit.jupiter.api.Test)

Example 23 with AffinityBuilder

use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi-kafka-operator by strimzi.

the class KafkaRollerIsolatedST method testKafkaPodPendingDueToRack.

@ParallelNamespaceTest
void testKafkaPodPendingDueToRack(ExtensionContext extensionContext) {
    // Testing this scenario
    // 1. deploy Kafka with wrong pod template (looking for nonexistent node) kafka pods should not exist
    // 2. wait for Kafka not ready, kafka pods should be in the pending state
    // 3. fix the Kafka CR, kafka pods should be in the pending state
    // 4. wait for Kafka ready, kafka pods should NOT be in the pending state
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    NodeSelectorRequirement nsr = new NodeSelectorRequirementBuilder().withKey("dedicated_test").withOperator("In").withValues("Kafka").build();
    NodeSelectorTerm nst = new NodeSelectorTermBuilder().withMatchExpressions(nsr).build();
    Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(nst).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
    PodTemplate pt = new PodTemplate();
    pt.setAffinity(affinity);
    KafkaClusterTemplate kct = new KafkaClusterTemplateBuilder().withPod(pt).build();
    resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().editKafka().withTemplate(kct).endKafka().endSpec().build());
    // pods are stable in the Pending state
    PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 3);
    LOGGER.info("Removing requirement for the affinity");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().getKafka().getTemplate().getPod().setAffinity(null), namespaceName);
    // kafka should get back ready in some reasonable time frame
    KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
    KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
    KafkaUtils.waitForKafkaDeletion(namespaceName, clusterName);
}
Also used : NodeSelectorRequirementBuilder(io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) KafkaClusterTemplateBuilder(io.strimzi.api.kafka.model.template.KafkaClusterTemplateBuilder) NodeSelectorRequirement(io.fabric8.kubernetes.api.model.NodeSelectorRequirement) Affinity(io.fabric8.kubernetes.api.model.Affinity) KafkaClusterTemplate(io.strimzi.api.kafka.model.template.KafkaClusterTemplate) NodeSelectorTerm(io.fabric8.kubernetes.api.model.NodeSelectorTerm) NodeSelectorTermBuilder(io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder) PodTemplate(io.strimzi.api.kafka.model.template.PodTemplate) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 24 with AffinityBuilder

use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi-kafka-operator by strimzi.

the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.

@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
    TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
    String rackKey = "rack-key";
    final int replicas = 3;
    int size = 5;
    List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
    List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
    List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
    List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
    topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
    drainCleaner.createDrainCleaner(extensionContext);
    String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
    String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
    Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
    // remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
    nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
    String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
    KafkaClients kafkaBasicExampleClients;
    for (int i = 0; i < size; i++) {
        kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
        resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
    }
    LOGGER.info("Starting Node drain");
    nodesWithPods.forEach((nodeName, podList) -> {
        String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
        String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
        Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        NodeUtils.drainNode(nodeName);
        NodeUtils.cordonNode(nodeName, true);
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
        RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
    });
    producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
    producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
    consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
Also used : AbstractST(io.strimzi.systemtest.AbstractST) IntStream(java.util.stream.IntStream) ResourceManager.kubeClient(io.strimzi.systemtest.resources.ResourceManager.kubeClient) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) TestStorage(io.strimzi.systemtest.storage.TestStorage) NodeUtils(io.strimzi.systemtest.utils.kubeUtils.objects.NodeUtils) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) KubeClusterResource(io.strimzi.test.k8s.KubeClusterResource) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) MultiNodeClusterOnly(io.strimzi.systemtest.annotations.MultiNodeClusterOnly) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) BeforeAllOnce(io.strimzi.systemtest.BeforeAllOnce) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Constants(io.strimzi.systemtest.Constants) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) RequiredMinKubeApiVersion(io.strimzi.systemtest.annotations.RequiredMinKubeApiVersion) AfterEach(org.junit.jupiter.api.AfterEach) List(java.util.List) SetupDrainCleaner(io.strimzi.systemtest.resources.draincleaner.SetupDrainCleaner) Logger(org.apache.logging.log4j.Logger) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) List(java.util.List) Map(java.util.Map) MultiNodeClusterOnly(io.strimzi.systemtest.annotations.MultiNodeClusterOnly) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest)

Aggregations

AffinityBuilder (io.fabric8.kubernetes.api.model.AffinityBuilder)22 Affinity (io.fabric8.kubernetes.api.model.Affinity)20 NodeSelectorTermBuilder (io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder)18 Toleration (io.fabric8.kubernetes.api.model.Toleration)14 TolerationBuilder (io.fabric8.kubernetes.api.model.TolerationBuilder)14 ParallelTest (io.strimzi.test.annotations.ParallelTest)14 Kafka (io.strimzi.api.kafka.model.Kafka)12 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)10 LabelSelectorBuilder (io.fabric8.kubernetes.api.model.LabelSelectorBuilder)8 ServiceAccount (io.fabric8.kubernetes.api.model.ServiceAccount)8 TopologySpreadConstraint (io.fabric8.kubernetes.api.model.TopologySpreadConstraint)8 TopologySpreadConstraintBuilder (io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder)8 Deployment (io.fabric8.kubernetes.api.model.apps.Deployment)8 LocalObjectReference (io.fabric8.kubernetes.api.model.LocalObjectReference)6 PodSecurityContextBuilder (io.fabric8.kubernetes.api.model.PodSecurityContextBuilder)6 Quantity (io.fabric8.kubernetes.api.model.Quantity)6 PodDisruptionBudget (io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget)6 HostAlias (io.fabric8.kubernetes.api.model.HostAlias)5 HostAliasBuilder (io.fabric8.kubernetes.api.model.HostAliasBuilder)5 Reconciliation (io.strimzi.operator.common.Reconciliation)5