use of io.fabric8.kubernetes.api.model.NodeSelectorRequirement in project strimzi-kafka-operator by strimzi.
the class ModelUtils method populateAffinityBuilderWithRackLabelSelector.
/**
* @param builder the builder which is used to populate the node affinity
* @param userAffinity the userAffinity which is defined by the user
* @param topologyKey the topology key which is used to select the node
* @return the AffinityBuilder which has the node selector with topology key which is needed to make sure
* the pods are scheduled only on nodes with the rack label
*/
public static AffinityBuilder populateAffinityBuilderWithRackLabelSelector(AffinityBuilder builder, Affinity userAffinity, String topologyKey) {
// We need to add node affinity to make sure the pods are scheduled only on nodes with the rack label
NodeSelectorRequirement selector = new NodeSelectorRequirementBuilder().withOperator("Exists").withKey(topologyKey).build();
if (userAffinity != null && userAffinity.getNodeAffinity() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms() != null) {
// User has specified some Node Selector Terms => we should enhance them
List<NodeSelectorTerm> oldTerms = userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms();
List<NodeSelectorTerm> enhancedTerms = new ArrayList<>(oldTerms.size());
for (NodeSelectorTerm term : oldTerms) {
NodeSelectorTerm enhancedTerm = new NodeSelectorTermBuilder(term).addToMatchExpressions(selector).build();
enhancedTerms.add(enhancedTerm);
}
builder = builder.editOrNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(enhancedTerms).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
} else {
// User has not specified any selector terms => we add our own
builder = builder.editOrNewNodeAffinity().editOrNewRequiredDuringSchedulingIgnoredDuringExecution().addNewNodeSelectorTerm().withMatchExpressions(selector).endNodeSelectorTerm().endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
}
return builder;
}
use of io.fabric8.kubernetes.api.model.NodeSelectorRequirement in project strimzi by strimzi.
the class ModelUtils method populateAffinityBuilderWithRackLabelSelector.
/**
* @param builder the builder which is used to populate the node affinity
* @param userAffinity the userAffinity which is defined by the user
* @param topologyKey the topology key which is used to select the node
* @return the AffinityBuilder which has the node selector with topology key which is needed to make sure
* the pods are scheduled only on nodes with the rack label
*/
public static AffinityBuilder populateAffinityBuilderWithRackLabelSelector(AffinityBuilder builder, Affinity userAffinity, String topologyKey) {
// We need to add node affinity to make sure the pods are scheduled only on nodes with the rack label
NodeSelectorRequirement selector = new NodeSelectorRequirementBuilder().withOperator("Exists").withKey(topologyKey).build();
if (userAffinity != null && userAffinity.getNodeAffinity() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms() != null) {
// User has specified some Node Selector Terms => we should enhance them
List<NodeSelectorTerm> oldTerms = userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms();
List<NodeSelectorTerm> enhancedTerms = new ArrayList<>(oldTerms.size());
for (NodeSelectorTerm term : oldTerms) {
NodeSelectorTerm enhancedTerm = new NodeSelectorTermBuilder(term).addToMatchExpressions(selector).build();
enhancedTerms.add(enhancedTerm);
}
builder = builder.editOrNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(enhancedTerms).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
} else {
// User has not specified any selector terms => we add our own
builder = builder.editOrNewNodeAffinity().editOrNewRequiredDuringSchedulingIgnoredDuringExecution().addNewNodeSelectorTerm().withMatchExpressions(selector).endNodeSelectorTerm().endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
}
return builder;
}
use of io.fabric8.kubernetes.api.model.NodeSelectorRequirement in project strimzi-kafka-operator by strimzi.
the class KafkaRollerIsolatedST method testKafkaPodPendingDueToRack.
@ParallelNamespaceTest
void testKafkaPodPendingDueToRack(ExtensionContext extensionContext) {
// Testing this scenario
// 1. deploy Kafka with wrong pod template (looking for nonexistent node) kafka pods should not exist
// 2. wait for Kafka not ready, kafka pods should be in the pending state
// 3. fix the Kafka CR, kafka pods should be in the pending state
// 4. wait for Kafka ready, kafka pods should NOT be in the pending state
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
NodeSelectorRequirement nsr = new NodeSelectorRequirementBuilder().withKey("dedicated_test").withOperator("In").withValues("Kafka").build();
NodeSelectorTerm nst = new NodeSelectorTermBuilder().withMatchExpressions(nsr).build();
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(nst).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
PodTemplate pt = new PodTemplate();
pt.setAffinity(affinity);
KafkaClusterTemplate kct = new KafkaClusterTemplateBuilder().withPod(pt).build();
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editSpec().editKafka().withTemplate(kct).endKafka().endSpec().build());
// pods are stable in the Pending state
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 3);
LOGGER.info("Removing requirement for the affinity");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().getKafka().getTemplate().getPod().setAffinity(null), namespaceName);
// kafka should get back ready in some reasonable time frame
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaUtils.waitForKafkaDeletion(namespaceName, clusterName);
}
Aggregations