use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi by strimzi.
the class ModelUtilsTest method testParsePodTemplate.
@Test
public void testParsePodTemplate() {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().build();
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
PodTemplate template = new PodTemplateBuilder().withNewMetadata().withAnnotations(Collections.singletonMap("annoKey", "annoValue")).withLabels(Collections.singletonMap("labelKey", "labelValue")).endMetadata().withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()).withImagePullSecrets(secret1, secret2).withTerminationGracePeriodSeconds(123).withAffinity(affinity).withTolerations(tolerations).build();
Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka);
ModelUtils.parsePodTemplate(model, template);
assertThat(model.templatePodLabels, is(Collections.singletonMap("labelKey", "labelValue")));
assertThat(model.templatePodAnnotations, is(Collections.singletonMap("annoKey", "annoValue")));
assertThat(model.templateTerminationGracePeriodSeconds, is(123));
assertThat(model.templateImagePullSecrets.size(), is(2));
assertThat(model.templateImagePullSecrets.contains(secret1), is(true));
assertThat(model.templateImagePullSecrets.contains(secret2), is(true));
assertThat(model.templateSecurityContext, is(notNullValue()));
assertThat(model.templateSecurityContext.getFsGroup(), is(Long.valueOf(123)));
assertThat(model.templateSecurityContext.getRunAsGroup(), is(Long.valueOf(456)));
assertThat(model.templateSecurityContext.getRunAsUser(), is(Long.valueOf(789)));
assertThat(model.getUserAffinity(), is(affinity));
assertThat(model.getTolerations(), is(tolerations));
}
use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi by strimzi.
the class ModelUtils method populateAffinityBuilderWithRackLabelSelector.
/**
* @param builder the builder which is used to populate the node affinity
* @param userAffinity the userAffinity which is defined by the user
* @param topologyKey the topology key which is used to select the node
* @return the AffinityBuilder which has the node selector with topology key which is needed to make sure
* the pods are scheduled only on nodes with the rack label
*/
public static AffinityBuilder populateAffinityBuilderWithRackLabelSelector(AffinityBuilder builder, Affinity userAffinity, String topologyKey) {
// We need to add node affinity to make sure the pods are scheduled only on nodes with the rack label
NodeSelectorRequirement selector = new NodeSelectorRequirementBuilder().withOperator("Exists").withKey(topologyKey).build();
if (userAffinity != null && userAffinity.getNodeAffinity() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms() != null) {
// User has specified some Node Selector Terms => we should enhance them
List<NodeSelectorTerm> oldTerms = userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms();
List<NodeSelectorTerm> enhancedTerms = new ArrayList<>(oldTerms.size());
for (NodeSelectorTerm term : oldTerms) {
NodeSelectorTerm enhancedTerm = new NodeSelectorTermBuilder(term).addToMatchExpressions(selector).build();
enhancedTerms.add(enhancedTerm);
}
builder = builder.editOrNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(enhancedTerms).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
} else {
// User has not specified any selector terms => we add our own
builder = builder.editOrNewNodeAffinity().editOrNewRequiredDuringSchedulingIgnoredDuringExecution().addNewNodeSelectorTerm().withMatchExpressions(selector).endNodeSelectorTerm().endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity();
}
return builder;
}
use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi by strimzi.
the class KafkaBridgeClusterTest method testTemplate.
@ParallelTest
public void testTemplate() {
Map<String, String> depLabels = TestUtils.map("l1", "v1", "l2", "v2", Labels.KUBERNETES_PART_OF_LABEL, "custom-part", Labels.KUBERNETES_MANAGED_BY_LABEL, "custom-managed-by");
Map<String, String> expectedDepLabels = new HashMap<>(depLabels);
expectedDepLabels.remove(Labels.KUBERNETES_MANAGED_BY_LABEL);
Map<String, String> depAnots = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnots = TestUtils.map("a3", "v3", "a4", "v4");
Map<String, String> svcLabels = TestUtils.map("l5", "v5", "l6", "v6");
Map<String, String> svcAnots = TestUtils.map("a5", "v5", "a6", "v6");
Map<String, String> pdbLabels = TestUtils.map("l7", "v7", "l8", "v8");
Map<String, String> pdbAnots = TestUtils.map("a7", "v7", "a8", "v8");
Map<String, String> saLabels = TestUtils.map("l9", "v9", "l10", "v10");
Map<String, String> saAnots = TestUtils.map("a9", "v9", "a10", "v10");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
KafkaBridge resource = new KafkaBridgeBuilder(this.resource).editSpec().withNewTemplate().withNewDeployment().withNewMetadata().withLabels(depLabels).withAnnotations(depAnots).endMetadata().withDeploymentStrategy(DeploymentStrategy.RECREATE).endDeployment().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnots).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withAffinity(affinity).withTolerations(tolerations).withTopologySpreadConstraints(tsc1, tsc2).withEnableServiceLinks(false).withTmpDirSizeLimit("10Mi").endPod().withNewApiService().withNewMetadata().withLabels(svcLabels).withAnnotations(svcAnots).endMetadata().withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK).withIpFamilies(IpFamily.IPV6, IpFamily.IPV4).endApiService().withNewPodDisruptionBudget().withNewMetadata().withLabels(pdbLabels).withAnnotations(pdbAnots).endMetadata().endPodDisruptionBudget().withNewServiceAccount().withNewMetadata().withLabels(saLabels).withAnnotations(saAnots).endMetadata().endServiceAccount().endTemplate().endSpec().build();
KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS);
// Check Deployment
Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null);
assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true));
assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority"));
assertThat(dep.getSpec().getStrategy().getType(), is("Recreate"));
assertThat(dep.getSpec().getStrategy().getRollingUpdate(), is(nullValue()));
// Check Pods
assertThat(dep.getSpec().getTemplate().getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getMetadata().getAnnotations().entrySet().containsAll(podAnots.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(dep.getSpec().getTemplate().getSpec().getAffinity(), is(affinity));
assertThat(dep.getSpec().getTemplate().getSpec().getTolerations(), is(tolerations));
assertThat(dep.getSpec().getTemplate().getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(dep.getSpec().getTemplate().getSpec().getEnableServiceLinks(), is(false));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi")));
// Check Service
Service svc = kbc.generateService();
assertThat(svc.getMetadata().getLabels().entrySet().containsAll(svcLabels.entrySet()), is(true));
assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(svcAnots.entrySet()), is(true));
assertThat(svc.getSpec().getIpFamilyPolicy(), is("PreferDualStack"));
assertThat(svc.getSpec().getIpFamilies(), contains("IPv6", "IPv4"));
// Check PodDisruptionBudget
PodDisruptionBudget pdb = kbc.generatePodDisruptionBudget();
assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true));
assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnots.entrySet()), is(true));
// Check PodDisruptionBudget V1Beta1
io.fabric8.kubernetes.api.model.policy.v1beta1.PodDisruptionBudget pdbV1Beta1 = kbc.generatePodDisruptionBudgetV1Beta1();
assertThat(pdbV1Beta1.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true));
assertThat(pdbV1Beta1.getMetadata().getAnnotations().entrySet().containsAll(pdbAnots.entrySet()), is(true));
// Check Service Account
ServiceAccount sa = kbc.generateServiceAccount();
assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true));
assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnots.entrySet()), is(true));
}
use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponentsDuringNodeDraining.
@IsolatedTest
@MultiNodeClusterOnly
void testDrainCleanerWithComponentsDuringNodeDraining(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
String rackKey = "rack-key";
final int replicas = 3;
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> testStorage.getTopicName() + "-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getProducerName() + "-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> testStorage.getConsumerName() + "-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().withNewPod().withAffinity(new AffinityBuilder().withNewPodAntiAffinity().addNewRequiredDuringSchedulingIgnoredDuringExecution().editOrNewLabelSelector().addNewMatchExpression().withKey(rackKey).withOperator("In").withValues("zone").endMatchExpression().endLabelSelector().withTopologyKey(rackKey).endRequiredDuringSchedulingIgnoredDuringExecution().endPodAntiAffinity().build()).endPod().endTemplate().endZookeeper().endSpec().build());
topicNames.forEach(topic -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), topic, 3, 3, 2).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build()));
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
Map<String, List<String>> nodesWithPods = NodeUtils.getPodsForEachNodeInNamespace(Constants.DRAIN_CLEANER_NAMESPACE);
// remove all pods from map, which doesn't contain "kafka" or "zookeeper" in its name
nodesWithPods.forEach((node, podlist) -> podlist.retainAll(podlist.stream().filter(podName -> (podName.contains("kafka") || podName.contains("zookeeper"))).collect(Collectors.toList())));
String producerAdditionConfiguration = "delivery.timeout.ms=30000\nrequest.timeout.ms=30000";
KafkaClients kafkaBasicExampleClients;
for (int i = 0; i < size; i++) {
kafkaBasicExampleClients = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withTopicName(topicNames.get(i)).withConsumerGroup(continuousConsumerGroups.get(i)).withMessageCount(300).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withDelayMs(1000).withAdditionalConfig(producerAdditionConfiguration).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
}
LOGGER.info("Starting Node drain");
nodesWithPods.forEach((nodeName, podList) -> {
String zkPodName = podList.stream().filter(podName -> podName.contains("zookeeper")).findFirst().get();
String kafkaPodName = podList.stream().filter(podName -> podName.contains("kafka")).findFirst().get();
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
NodeUtils.drainNode(nodeName);
NodeUtils.cordonNode(nodeName, true);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
});
producerNames.forEach(producer -> ClientUtils.waitTillContinuousClientsFinish(producer, consumerNames.get(producerNames.indexOf(producer)), Constants.DRAIN_CLEANER_NAMESPACE, 300));
producerNames.forEach(producer -> KubeClusterResource.kubeClient().deleteJob(producer));
consumerNames.forEach(consumer -> KubeClusterResource.kubeClient().deleteJob(consumer));
}
use of io.fabric8.kubernetes.api.model.AffinityBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaBridgeClusterTest method testTemplate.
@ParallelTest
public void testTemplate() {
Map<String, String> depLabels = TestUtils.map("l1", "v1", "l2", "v2", Labels.KUBERNETES_PART_OF_LABEL, "custom-part", Labels.KUBERNETES_MANAGED_BY_LABEL, "custom-managed-by");
Map<String, String> expectedDepLabels = new HashMap<>(depLabels);
expectedDepLabels.remove(Labels.KUBERNETES_MANAGED_BY_LABEL);
Map<String, String> depAnots = TestUtils.map("a1", "v1", "a2", "v2");
Map<String, String> podLabels = TestUtils.map("l3", "v3", "l4", "v4");
Map<String, String> podAnots = TestUtils.map("a3", "v3", "a4", "v4");
Map<String, String> svcLabels = TestUtils.map("l5", "v5", "l6", "v6");
Map<String, String> svcAnots = TestUtils.map("a5", "v5", "a6", "v6");
Map<String, String> pdbLabels = TestUtils.map("l7", "v7", "l8", "v8");
Map<String, String> pdbAnots = TestUtils.map("a7", "v7", "a8", "v8");
Map<String, String> saLabels = TestUtils.map("l9", "v9", "l10", "v10");
Map<String, String> saAnots = TestUtils.map("a9", "v9", "a10", "v10");
Affinity affinity = new AffinityBuilder().withNewNodeAffinity().withNewRequiredDuringSchedulingIgnoredDuringExecution().withNodeSelectorTerms(new NodeSelectorTermBuilder().addNewMatchExpression().withKey("key1").withOperator("In").withValues("value1", "value2").endMatchExpression().build()).endRequiredDuringSchedulingIgnoredDuringExecution().endNodeAffinity().build();
List<Toleration> tolerations = singletonList(new TolerationBuilder().withEffect("NoExecute").withKey("key1").withOperator("Equal").withValue("value1").build());
TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/zone").withMaxSkew(1).withWhenUnsatisfiable("DoNotSchedule").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder().withTopologyKey("kubernetes.io/hostname").withMaxSkew(2).withWhenUnsatisfiable("ScheduleAnyway").withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()).build();
KafkaBridge resource = new KafkaBridgeBuilder(this.resource).editSpec().withNewTemplate().withNewDeployment().withNewMetadata().withLabels(depLabels).withAnnotations(depAnots).endMetadata().withDeploymentStrategy(DeploymentStrategy.RECREATE).endDeployment().withNewPod().withNewMetadata().withLabels(podLabels).withAnnotations(podAnots).endMetadata().withPriorityClassName("top-priority").withSchedulerName("my-scheduler").withAffinity(affinity).withTolerations(tolerations).withTopologySpreadConstraints(tsc1, tsc2).withEnableServiceLinks(false).withTmpDirSizeLimit("10Mi").endPod().withNewApiService().withNewMetadata().withLabels(svcLabels).withAnnotations(svcAnots).endMetadata().withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK).withIpFamilies(IpFamily.IPV6, IpFamily.IPV4).endApiService().withNewPodDisruptionBudget().withNewMetadata().withLabels(pdbLabels).withAnnotations(pdbAnots).endMetadata().endPodDisruptionBudget().withNewServiceAccount().withNewMetadata().withLabels(saLabels).withAnnotations(saAnots).endMetadata().endServiceAccount().endTemplate().endSpec().build();
KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS);
// Check Deployment
Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null);
assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true));
assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority"));
assertThat(dep.getSpec().getStrategy().getType(), is("Recreate"));
assertThat(dep.getSpec().getStrategy().getRollingUpdate(), is(nullValue()));
// Check Pods
assertThat(dep.getSpec().getTemplate().getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getMetadata().getAnnotations().entrySet().containsAll(podAnots.entrySet()), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getSchedulerName(), is("my-scheduler"));
assertThat(dep.getSpec().getTemplate().getSpec().getAffinity(), is(affinity));
assertThat(dep.getSpec().getTemplate().getSpec().getTolerations(), is(tolerations));
assertThat(dep.getSpec().getTemplate().getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2));
assertThat(dep.getSpec().getTemplate().getSpec().getEnableServiceLinks(), is(false));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi")));
// Check Service
Service svc = kbc.generateService();
assertThat(svc.getMetadata().getLabels().entrySet().containsAll(svcLabels.entrySet()), is(true));
assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(svcAnots.entrySet()), is(true));
assertThat(svc.getSpec().getIpFamilyPolicy(), is("PreferDualStack"));
assertThat(svc.getSpec().getIpFamilies(), contains("IPv6", "IPv4"));
// Check PodDisruptionBudget
PodDisruptionBudget pdb = kbc.generatePodDisruptionBudget();
assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true));
assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnots.entrySet()), is(true));
// Check PodDisruptionBudget V1Beta1
io.fabric8.kubernetes.api.model.policy.v1beta1.PodDisruptionBudget pdbV1Beta1 = kbc.generatePodDisruptionBudgetV1Beta1();
assertThat(pdbV1Beta1.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true));
assertThat(pdbV1Beta1.getMetadata().getAnnotations().entrySet().containsAll(pdbAnots.entrySet()), is(true));
// Check Service Account
ServiceAccount sa = kbc.generateServiceAccount();
assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true));
assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnots.entrySet()), is(true));
}
Aggregations