use of io.fabric8.kubernetes.api.model.PodAffinity in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method buildKafkaTemplate.
private KafkaClusterTemplate buildKafkaTemplate(ManagedKafka managedKafka) {
AffinityBuilder affinityBuilder = new AffinityBuilder();
// ensures even distribution of the Kafka pods in a given cluster across the availability zones
// the previous affinity make sure single per node or not
// this only comes into picture when there are more number of nodes than the brokers
PodTemplateBuilder podTemplateBuilder = new PodTemplateBuilder().withImagePullSecrets(imagePullSecretManager.getOperatorImagePullSecrets(managedKafka)).withTopologySpreadConstraints(azAwareTopologySpreadConstraint(managedKafka.getMetadata().getName() + "-kafka", DO_NOT_SCHEDULE));
if (this.config.getKafka().isColocateWithZookeeper()) {
// adds preference to co-locate Kafka broker pods with ZK pods with same cluster label
PodAffinity zkPodAffinity = OperandUtils.buildZookeeperPodAffinity(managedKafka).getPodAffinity();
affinityBuilder.withPodAffinity(zkPodAffinity);
podTemplateBuilder.withAffinity(affinityBuilder.build());
}
// add toleration on broker pod such that it can be placed on specific worker nodes
// note that the affinity/topology stuff make sure they are evenly spread across
// the availability zone and worker nodes, but all worker nodes are same as
// some of them will have ZK, admin-server, canary and broker needs to be on its own
podTemplateBuilder.withTolerations(buildKafkaBrokerToleration());
KafkaClusterTemplateBuilder templateBuilder = new KafkaClusterTemplateBuilder().withPod(podTemplateBuilder.build());
if (drainCleanerManager.isDrainCleanerWebhookFound()) {
templateBuilder.withPodDisruptionBudget(new PodDisruptionBudgetTemplateBuilder().withMaxUnavailable(0).build());
}
return templateBuilder.build();
}
use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi by strimzi.
the class RackAwarenessST method testConnectRackAwareness.
@Tag(CONNECT)
@ParallelNamespaceTest
void testConnectRackAwareness(ExtensionContext extensionContext) {
Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
TestStorage storage = storageMap.get(extensionContext);
String invalidTopologyKey = "invalid-topology-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).build());
LOGGER.info("Deploy KafkaConnect with an invalid topology key: {}", invalidTopologyKey);
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(storage.getClusterName(), 1).editSpec().withNewRack(invalidTopologyKey).endSpec().build());
LOGGER.info("Check that KafkaConnect pod is unschedulable");
KafkaConnectUtils.waitForConnectPodCondition("Unschedulable", storage.getNamespaceName(), storage.getClusterName(), 30_000);
LOGGER.info("Fix KafkaConnect with a valid topology key: {}", TOPOLOGY_KEY);
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(storage.getClusterName(), kc -> kc.getSpec().setRack(new Rack(TOPOLOGY_KEY)), storage.getNamespaceName());
KafkaConnectUtils.waitForConnectReady(storage.getNamespaceName(), storage.getClusterName());
LOGGER.info("KafkaConnect cluster deployed successfully");
String deployName = KafkaConnectResources.deploymentName(storage.getClusterName());
String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), deployName);
Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
// check that spec matches the actual pod configuration
Affinity specAffinity = kubeClient().getDeployment(storage.getNamespaceName(), deployName).getSpec().getTemplate().getSpec().getAffinity();
NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(podNodeRequirement, is(specNodeRequirement));
assertThat(podNodeRequirement.getKey(), is(TOPOLOGY_KEY));
assertThat(podNodeRequirement.getOperator(), is("Exists"));
// check Kafka client rack awareness configuration
String podNodeName = pod.getSpec().getNodeName();
String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
String commandOut = cmdKubeClient(storage.getNamespaceName()).execInPod(podName, "/bin/bash", "-c", "cat /tmp/strimzi-connect.properties | grep consumer.client.rack").out().trim();
assertThat(commandOut.equals("consumer.client.rack=" + hostname), is(true));
}
use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi-kafka-operator by strimzi.
the class RackAwarenessST method testConnectRackAwareness.
@Tag(CONNECT)
@ParallelNamespaceTest
void testConnectRackAwareness(ExtensionContext extensionContext) {
Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
TestStorage storage = storageMap.get(extensionContext);
String invalidTopologyKey = "invalid-topology-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).build());
LOGGER.info("Deploy KafkaConnect with an invalid topology key: {}", invalidTopologyKey);
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(storage.getClusterName(), 1).editSpec().withNewRack(invalidTopologyKey).endSpec().build());
LOGGER.info("Check that KafkaConnect pod is unschedulable");
KafkaConnectUtils.waitForConnectPodCondition("Unschedulable", storage.getNamespaceName(), storage.getClusterName(), 30_000);
LOGGER.info("Fix KafkaConnect with a valid topology key: {}", TOPOLOGY_KEY);
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(storage.getClusterName(), kc -> kc.getSpec().setRack(new Rack(TOPOLOGY_KEY)), storage.getNamespaceName());
KafkaConnectUtils.waitForConnectReady(storage.getNamespaceName(), storage.getClusterName());
LOGGER.info("KafkaConnect cluster deployed successfully");
String deployName = KafkaConnectResources.deploymentName(storage.getClusterName());
String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), deployName);
Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
// check that spec matches the actual pod configuration
Affinity specAffinity = kubeClient().getDeployment(storage.getNamespaceName(), deployName).getSpec().getTemplate().getSpec().getAffinity();
NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(podNodeRequirement, is(specNodeRequirement));
assertThat(podNodeRequirement.getKey(), is(TOPOLOGY_KEY));
assertThat(podNodeRequirement.getOperator(), is("Exists"));
// check Kafka client rack awareness configuration
String podNodeName = pod.getSpec().getNodeName();
String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
String commandOut = cmdKubeClient(storage.getNamespaceName()).execInPod(podName, "/bin/bash", "-c", "cat /tmp/strimzi-connect.properties | grep consumer.client.rack").out().trim();
assertThat(commandOut.equals("consumer.client.rack=" + hostname), is(true));
}
use of io.fabric8.kubernetes.api.model.PodAffinity in project strimzi-kafka-operator by strimzi.
the class RackAwarenessST method testKafkaRackAwareness.
@ParallelNamespaceTest
void testKafkaRackAwareness(ExtensionContext extensionContext) {
Assumptions.assumeFalse(Environment.isNamespaceRbacScope());
TestStorage storage = storageMap.get(extensionContext);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(storage.getClusterName(), 1, 1).editSpec().editKafka().withNewRack(TOPOLOGY_KEY).addToConfig("replica.selector.class", "org.apache.kafka.common.replica.RackAwareReplicaSelector").endKafka().endSpec().build());
LOGGER.info("Kafka cluster deployed successfully");
String ssName = KafkaResources.kafkaStatefulSetName(storage.getClusterName());
String podName = PodUtils.getPodNameByPrefix(storage.getNamespaceName(), ssName);
Pod pod = kubeClient().getPod(storage.getNamespaceName(), podName);
// check that spec matches the actual pod configuration
Affinity specAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(storage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(storage.getClusterName()));
NodeSelectorRequirement specNodeRequirement = specAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
NodeAffinity podAffinity = pod.getSpec().getAffinity().getNodeAffinity();
NodeSelectorRequirement podNodeRequirement = podAffinity.getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(podNodeRequirement, is(specNodeRequirement));
assertThat(specNodeRequirement.getKey(), is(TOPOLOGY_KEY));
assertThat(specNodeRequirement.getOperator(), is("Exists"));
PodAffinityTerm specPodAntiAffinityTerm = specAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
PodAffinityTerm podAntiAffinityTerm = pod.getSpec().getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
assertThat(podAntiAffinityTerm, is(specPodAntiAffinityTerm));
assertThat(specPodAntiAffinityTerm.getTopologyKey(), is(TOPOLOGY_KEY));
assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", storage.getClusterName()));
assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(storage.getClusterName())));
// check Kafka rack awareness configuration
String podNodeName = pod.getSpec().getNodeName();
String hostname = podNodeName.contains(".") ? podNodeName.substring(0, podNodeName.indexOf(".")) : podNodeName;
String rackIdOut = cmdKubeClient(storage.getNamespaceName()).execInPod(KafkaResources.kafkaPodName(storage.getClusterName(), 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out().trim();
String brokerRackOut = cmdKubeClient(storage.getNamespaceName()).execInPod(KafkaResources.kafkaPodName(storage.getClusterName(), 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out().trim();
assertThat(rackIdOut.trim(), is(hostname));
assertThat(brokerRackOut.contains("broker.rack=" + hostname), is(true));
}
use of io.fabric8.kubernetes.api.model.PodAffinity in project syndesis-qe by syndesisio.
the class OperatorValidationSteps method checkAffinity.
@When("^check (affinity|tolerations)( not set)? for (infra|integration) pods$")
public void checkAffinity(String test, String notSet, String method) {
List<Pod> pods = "infra".equals(method) ? ComponentUtils.getComponentPods().stream().filter(p -> !p.getMetadata().getName().contains("operator")).collect(Collectors.toList()) : OpenShiftUtils.findPodsByPredicates(p -> "integration".equals(p.getMetadata().getLabels().get("syndesis.io/type")));
for (Pod p : pods) {
String name = p.getMetadata().getName();
if ("affinity".equals(test)) {
Affinity podAffinity = p.getSpec().getAffinity();
if (notSet == null) {
assertThat(podAffinity).as(name + ": affinity is null").isNotNull();
NodeAffinity nodeAffinity = podAffinity.getNodeAffinity();
assertThat(nodeAffinity).as(name + ": node affinity is null").isNotNull();
NodeSelector selector = nodeAffinity.getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(selector).as(name + ": required is null").isNotNull();
List<NodeSelectorTerm> terms = selector.getNodeSelectorTerms();
assertThat(terms).as(name + ": node selector is null").isNotNull();
assertThat(terms).as(name + ": node selector size isn't 1").hasSize(1);
} else {
assertThat(podAffinity).isNull();
}
} else {
Optional<Toleration> toleration = p.getSpec().getTolerations().stream().filter(t -> "node.kubernetes.io/network-unavailable".equals(t.getKey())).findAny();
if (notSet == null) {
assertThat(toleration).as(name + ": Expected toleration setting is not present").isPresent();
} else {
assertThat(toleration).as(name + ": Toleration shouldn't be present").isNotPresent();
}
}
}
}
Aggregations