Search in sources :

Example 1 with PodAffinity

use of io.fabric8.kubernetes.api.model.PodAffinity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class KafkaCluster method buildKafkaTemplate.

private KafkaClusterTemplate buildKafkaTemplate(ManagedKafka managedKafka) {
    AffinityBuilder affinityBuilder = new AffinityBuilder();
    // ensures even distribution of the Kafka pods in a given cluster across the availability zones
    // the previous affinity make sure single per node or not
    // this only comes into picture when there are more number of nodes than the brokers
    PodTemplateBuilder podTemplateBuilder = new PodTemplateBuilder().withImagePullSecrets(imagePullSecretManager.getOperatorImagePullSecrets(managedKafka)).withTopologySpreadConstraints(azAwareTopologySpreadConstraint(managedKafka.getMetadata().getName() + "-kafka", DO_NOT_SCHEDULE));
    if (this.config.getKafka().isColocateWithZookeeper()) {
        // adds preference to co-locate Kafka broker pods with ZK pods with same cluster label
        PodAffinity zkPodAffinity = OperandUtils.buildZookeeperPodAffinity(managedKafka).getPodAffinity();
        affinityBuilder.withPodAffinity(zkPodAffinity);
        podTemplateBuilder.withAffinity(affinityBuilder.build());
    }
    // add toleration on broker pod such that it can be placed on specific worker nodes
    // note that the affinity/topology stuff make sure they are evenly spread across
    // the availability zone and worker nodes, but all worker nodes are same as
    // some of them will have ZK, admin-server, canary and broker needs to be on its own
    podTemplateBuilder.withTolerations(buildKafkaBrokerToleration());
    KafkaClusterTemplateBuilder templateBuilder = new KafkaClusterTemplateBuilder().withPod(podTemplateBuilder.build());
    if (drainCleanerManager.isDrainCleanerWebhookFound()) {
        templateBuilder.withPodDisruptionBudget(new PodDisruptionBudgetTemplateBuilder().withMaxUnavailable(0).build());
    }
    return templateBuilder.build();
}
Also used : PodAffinity(io.fabric8.kubernetes.api.model.PodAffinity) PodTemplateBuilder(io.strimzi.api.kafka.model.template.PodTemplateBuilder) PodAntiAffinityBuilder(io.fabric8.kubernetes.api.model.PodAntiAffinityBuilder) AffinityBuilder(io.fabric8.kubernetes.api.model.AffinityBuilder) KafkaClusterTemplateBuilder(io.strimzi.api.kafka.model.template.KafkaClusterTemplateBuilder) PodDisruptionBudgetTemplateBuilder(io.strimzi.api.kafka.model.template.PodDisruptionBudgetTemplateBuilder)

Example 2 with PodAffinity

use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi by strimzi.

the class RackAwarenessST method testConnectRackAwareness.

@Tag(CONNECT)
@ParallelNamespaceTest
void testConnectRackAwareness(ExtensionContext extensionContext) {
    Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
    TestStorage storage = storageMap.get(extensionContext);
    String invalidTopologyKey = "invalid-topology-key";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).build());
    LOGGER.info("Deploy KafkaConnect with an invalid topology key: {}", invalidTopologyKey);
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(storage.getClusterName(), 1).editSpec().withNewRack(invalidTopologyKey).endSpec().build());
    LOGGER.info("Check that KafkaConnect pod is unschedulable");
    KafkaConnectUtils.waitForConnectPodCondition("Unschedulable", storage.getNamespaceName(), storage.getClusterName(), 30_000);
    LOGGER.info("Fix KafkaConnect with a valid topology key: {}", TOPOLOGY_KEY);
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(storage.getClusterName(), kc -> kc.getSpec().setRack(new Rack(TOPOLOGY_KEY)), storage.getNamespaceName());
    KafkaConnectUtils.waitForConnectReady(storage.getNamespaceName(), storage.getClusterName());
    LOGGER.info("KafkaConnect cluster deployed successfully");
    String deployName = KafkaConnectResources.deploymentName(storage.getClusterName());
    String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), deployName);
    Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
    // check that spec matches the actual pod configuration
    Affinity specAffinity = kubeClient().getDeployment(storage.getNamespaceName(), deployName).getSpec().getTemplate().getSpec().getAffinity();
    NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
    NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    assertThat(podNodeRequirement, is(specNodeRequirement));
    assertThat(podNodeRequirement.getKey(), is(TOPOLOGY_KEY));
    assertThat(podNodeRequirement.getOperator(), is("Exists"));
    // check Kafka client rack awareness configuration
    String podNodeName = pod.getSpec().getNodeName();
    String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
    String commandOut = cmdKubeClient(storage.getNamespaceName()).execInPod(podName, "/bin/bash", "-c", "cat /tmp/strimzi-connect.properties | grep consumer.client.rack").out().trim();
    assertThat(commandOut.equals("consumer.client.rack=" + hostname), is(true));
}
Also used : Rack(io.strimzi.api.kafka.model.Rack) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) Pod(io.fabric8.kubernetes.api.model.Pod) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) NodeSelectorRequirement(io.fabric8.kubernetes.api.model.NodeSelectorRequirement) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 3 with PodAffinity

use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi-kafka-operator by strimzi.

the class RackAwarenessST method testConnectRackAwareness.

@Tag(CONNECT)
@ParallelNamespaceTest
void testConnectRackAwareness(ExtensionContext extensionContext) {
    Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
    TestStorage storage = storageMap.get(extensionContext);
    String invalidTopologyKey = "invalid-topology-key";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).build());
    LOGGER.info("Deploy KafkaConnect with an invalid topology key: {}", invalidTopologyKey);
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(storage.getClusterName(), 1).editSpec().withNewRack(invalidTopologyKey).endSpec().build());
    LOGGER.info("Check that KafkaConnect pod is unschedulable");
    KafkaConnectUtils.waitForConnectPodCondition("Unschedulable", storage.getNamespaceName(), storage.getClusterName(), 30_000);
    LOGGER.info("Fix KafkaConnect with a valid topology key: {}", TOPOLOGY_KEY);
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(storage.getClusterName(), kc -> kc.getSpec().setRack(new Rack(TOPOLOGY_KEY)), storage.getNamespaceName());
    KafkaConnectUtils.waitForConnectReady(storage.getNamespaceName(), storage.getClusterName());
    LOGGER.info("KafkaConnect cluster deployed successfully");
    String deployName = KafkaConnectResources.deploymentName(storage.getClusterName());
    String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), deployName);
    Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
    // check that spec matches the actual pod configuration
    Affinity specAffinity = kubeClient().getDeployment(storage.getNamespaceName(), deployName).getSpec().getTemplate().getSpec().getAffinity();
    NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
    NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    assertThat(podNodeRequirement, is(specNodeRequirement));
    assertThat(podNodeRequirement.getKey(), is(TOPOLOGY_KEY));
    assertThat(podNodeRequirement.getOperator(), is("Exists"));
    // check Kafka client rack awareness configuration
    String podNodeName = pod.getSpec().getNodeName();
    String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
    String commandOut = cmdKubeClient(storage.getNamespaceName()).execInPod(podName, "/bin/bash", "-c", "cat /tmp/strimzi-connect.properties | grep consumer.client.rack").out().trim();
    assertThat(commandOut.equals("consumer.client.rack=" + hostname), is(true));
}
Also used : Rack(io.strimzi.api.kafka.model.Rack) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) Pod(io.fabric8.kubernetes.api.model.Pod) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) NodeSelectorRequirement(io.fabric8.kubernetes.api.model.NodeSelectorRequirement) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 4 with PodAffinity

use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi-kafka-operator by strimzi.

the class RackAwarenessST method testKafkaRackAwareness.

@ParallelNamespaceTest
void testKafkaRackAwareness(ExtensionContext extensionContext) {
    Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
    TestStorage storage = storageMap.get(extensionContext);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).editSpec().editKafka().withNewRack(TOPOLOGY_KEY).addToConfig("replica.selector.class", "org.apache.kafka.common.replica.RackAwareReplicaSelector").endKafka().endSpec().build());
    LOGGER.info("Kafka cluster deployed successfully");
    String ssName = KafkaResources.kafkaStatefulSetName(storage.getClusterName());
    String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), ssName);
    Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
    // check that spec matches the actual pod configuration
    Affinity specAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(storage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(storage.getClusterName()));
    NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
    NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
    assertThat(podNodeRequirement, is(specNodeRequirement));
    assertThat(specNodeRequirement.getKey(), is(TOPOLOGY_KEY));
    assertThat(specNodeRequirement.getOperator(), is("Exists"));
    PodAffinityTerm specPodAntiAffinityTerm = specAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
    PodAffinityTerm podAntiAffinityTerm = pod.getSpec().getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
    assertThat(podAntiAffinityTerm, is(specPodAntiAffinityTerm));
    assertThat(specPodAntiAffinityTerm.getTopologyKey(), is(TOPOLOGY_KEY));
    assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", storage.getClusterName()));
    assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(storage.getClusterName())));
    // check Kafka rack awareness configuration
    String podNodeName = pod.getSpec().getNodeName();
    String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
    String rackIdOut = cmdKubeClient(storage.getNamespaceName()).execInPod(KafkaResources.kafkaPodName(storage.getClusterName(), 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out().trim();
    String brokerRackOut = cmdKubeClient(storage.getNamespaceName()).execInPod(KafkaResources.kafkaPodName(storage.getClusterName(), 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out().trim();
    assertThat(rackIdOut.trim(), is(hostname));
    assertThat(brokerRackOut.contains("broker.rack=" + hostname), is(true));
}
Also used : NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) PodAffinityTerm(io.fabric8.kubernetes.api.model.PodAffinityTerm) Pod(io.fabric8.kubernetes.api.model.Pod) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) NodeSelectorRequirement(io.fabric8.kubernetes.api.model.NodeSelectorRequirement) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 5 with PodAffinity

use of io.fabric8.kubernetes.api.model.PodAffinity in project syndesis-qe by syndesisio.

the class OperatorValidationSteps method checkAffinity.

@When("^check (affinity|tolerations)( not set)? for (infra|integration) pods$")
public void checkAffinity(String test, String notSet, String method) {
    List<Pod> pods = "infra".equals(method) ? ComponentUtils.getComponentPods().stream().filter(p -> !p.getMetadata().getName().contains("operator")).collect(Collectors.toList()) : OpenShiftUtils.findPodsByPredicates(p -> "integration".equals(p.getMetadata().getLabels().get("syndesis.io/type")));
    for (Pod p : pods) {
        String name = p.getMetadata().getName();
        if ("affinity".equals(test)) {
            Affinity podAffinity = p.getSpec().getAffinity();
            if (notSet == null) {
                assertThat(podAffinity).as(name + ": affinity is null").isNotNull();
                NodeAffinity nodeAffinity = podAffinity.getNodeAffinity();
                assertThat(nodeAffinity).as(name + ": node affinity is null").isNotNull();
                NodeSelector selector = nodeAffinity.getRequiredDuringSchedulingIgnoredDuringExecution();
                assertThat(selector).as(name + ": required is null").isNotNull();
                List<NodeSelectorTerm> terms = selector.getNodeSelectorTerms();
                assertThat(terms).as(name + ": node selector is null").isNotNull();
                assertThat(terms).as(name + ": node selector size isn't 1").hasSize(1);
            } else {
                assertThat(podAffinity).isNull();
            }
        } else {
            Optional<Toleration> toleration = p.getSpec().getTolerations().stream().filter(t -> "node.kubernetes.io/network-unavailable".equals(t.getKey())).findAny();
            if (notSet == null) {
                assertThat(toleration).as(name + ": Expected toleration setting is not present").isPresent();
            } else {
                assertThat(toleration).as(name + ": Toleration shouldn't be present").isNotPresent();
            }
        }
    }
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) Arrays(java.util.Arrays) Enumeration(java.util.Enumeration) Date(java.util.Date) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Autowired(org.springframework.beans.factory.annotation.Autowired) TimeoutException(java.util.concurrent.TimeoutException) Random(java.util.Random) StringUtils(org.apache.commons.lang3.StringUtils) JSONObject(org.json.JSONObject) Matcher(java.util.regex.Matcher) IntegrationsEndpoint(io.syndesis.qe.endpoint.IntegrationsEndpoint) Map(java.util.Map) Addon(io.syndesis.qe.addon.Addon) ZipFile(java.util.zip.ZipFile) Syndesis(io.syndesis.qe.resource.impl.Syndesis) Path(java.nio.file.Path) ZipEntry(java.util.zip.ZipEntry) Affinity(io.fabric8.kubernetes.api.model.Affinity) AccountsDirectory(io.syndesis.qe.account.AccountsDirectory) Set(java.util.Set) DeploymentConfig(io.fabric8.openshift.api.model.DeploymentConfig) Jaeger(io.syndesis.qe.resource.impl.Jaeger) DoneablePersistentVolume(io.fabric8.kubernetes.api.model.DoneablePersistentVolume) Collectors(java.util.stream.Collectors) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) Assertions.fail(org.assertj.core.api.Assertions.fail) S3BucketNameBuilder(io.syndesis.qe.utils.aws.S3BucketNameBuilder) ResourceFactory(io.syndesis.qe.resource.ResourceFactory) Optional(java.util.Optional) Lazy(org.springframework.context.annotation.Lazy) Pattern(java.util.regex.Pattern) Component(io.syndesis.qe.component.Component) OpenShiftWaitUtils(io.syndesis.qe.wait.OpenShiftWaitUtils) SoftAssertions(org.assertj.core.api.SoftAssertions) NodeSelectorTerm(io.fabric8.kubernetes.api.model.NodeSelectorTerm) Then(io.cucumber.java.en.Then) ComponentUtils(io.syndesis.qe.component.ComponentUtils) LocalPortForward(io.fabric8.kubernetes.client.LocalPortForward) HashMap(java.util.HashMap) Yaml(org.yaml.snakeyaml.Yaml) Given(io.cucumber.java.en.Given) DataTable(io.cucumber.datatable.DataTable) Node(io.fabric8.kubernetes.api.model.Node) OutputStream(java.io.OutputStream) PersistentVolumeFluent(io.fabric8.kubernetes.api.model.PersistentVolumeFluent) Endpoints(io.fabric8.kubernetes.api.model.Endpoints) Files(java.nio.file.Files) When(io.cucumber.java.en.When) S3Utils(io.syndesis.qe.utils.aws.S3Utils) FileOutputStream(java.io.FileOutputStream) Pod(io.fabric8.kubernetes.api.model.Pod) FileUtils(org.apache.commons.io.FileUtils) IOException(java.io.IOException) NodeSelector(io.fabric8.kubernetes.api.model.NodeSelector) Toleration(io.fabric8.kubernetes.api.model.Toleration) OpenShiftUtils(io.syndesis.qe.utils.OpenShiftUtils) FileInputStream(java.io.FileInputStream) Account(io.syndesis.qe.account.Account) File(java.io.File) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) HTTPUtils(io.syndesis.qe.utils.http.HTTPUtils) TestUtils(io.syndesis.qe.utils.TestUtils) Paths(java.nio.file.Paths) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ExternalDatabase(io.syndesis.qe.resource.impl.ExternalDatabase) JSONArray(org.json.JSONArray) InputStream(java.io.InputStream) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) Pod(io.fabric8.kubernetes.api.model.Pod) Toleration(io.fabric8.kubernetes.api.model.Toleration) Affinity(io.fabric8.kubernetes.api.model.Affinity) NodeAffinity(io.fabric8.kubernetes.api.model.NodeAffinity) NodeSelector(io.fabric8.kubernetes.api.model.NodeSelector) NodeSelectorTerm(io.fabric8.kubernetes.api.model.NodeSelectorTerm) When(io.cucumber.java.en.When)

Aggregations

Affinity (io.fabric8.kubernetes.api.model.Affinity)7 NodeAffinity (io.fabric8.kubernetes.api.model.NodeAffinity)7 Pod (io.fabric8.kubernetes.api.model.Pod)7 NodeSelectorRequirement (io.fabric8.kubernetes.api.model.NodeSelectorRequirement)6 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)6 TestStorage (io.strimzi.systemtest.storage.TestStorage)6 Tag (org.junit.jupiter.api.Tag)4 PodAffinityTerm (io.fabric8.kubernetes.api.model.PodAffinityTerm)2 Rack (io.strimzi.api.kafka.model.Rack)2 DataTable (io.cucumber.datatable.DataTable)1 Given (io.cucumber.java.en.Given)1 Then (io.cucumber.java.en.Then)1 When (io.cucumber.java.en.When)1 AffinityBuilder (io.fabric8.kubernetes.api.model.AffinityBuilder)1 DoneablePersistentVolume (io.fabric8.kubernetes.api.model.DoneablePersistentVolume)1 Endpoints (io.fabric8.kubernetes.api.model.Endpoints)1 Node (io.fabric8.kubernetes.api.model.Node)1 NodeSelector (io.fabric8.kubernetes.api.model.NodeSelector)1 NodeSelectorTerm (io.fabric8.kubernetes.api.model.NodeSelectorTerm)1 PersistentVolumeFluent (io.fabric8.kubernetes.api.model.PersistentVolumeFluent)1