use of io.fabric8.kubernetes.api.model.PodAffinityTerm in project strimzi by strimzi.
the class SpecificIsolatedST method testRackAware.
@IsolatedTest("UtestRackAwareConnectWrongDeploymentsing more tha one Kafka cluster in one namespace")
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAware(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String producerName = "hello-world-producer";
String consumerName = "hello-world-consumer";
String rackKey = "rack-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().endKafka().endSpec().build());
Affinity kafkaPodSpecAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(KafkaResources.kafkaStatefulSetName(clusterName));
NodeSelectorRequirement kafkaPodNodeSelectorRequirement = kafkaPodSpecAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(kafkaPodNodeSelectorRequirement.getKey(), is(rackKey));
assertThat(kafkaPodNodeSelectorRequirement.getOperator(), is("Exists"));
PodAffinityTerm kafkaPodAffinityTerm = kafkaPodSpecAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
assertThat(kafkaPodAffinityTerm.getTopologyKey(), is(rackKey));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", clusterName));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(clusterName)));
String rackId = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out();
assertThat(rackId.trim(), is("zone"));
String brokerRack = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out();
assertThat(brokerRack.contains("broker.rack=zone"), is(true));
String uid = kubeClient().getPodUid(KafkaResources.kafkaPodName(clusterName, 0));
List<Event> events = kubeClient().listEventsByResourceUid(uid);
assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started));
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
}
use of io.fabric8.kubernetes.api.model.PodAffinityTerm in project strimzi-kafka-operator by strimzi.
the class SpecificIsolatedST method testRackAware.
@IsolatedTest("UtestRackAwareConnectWrongDeploymentsing more tha one Kafka cluster in one namespace")
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAware(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String producerName = "hello-world-producer";
String consumerName = "hello-world-consumer";
String rackKey = "rack-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().endKafka().endSpec().build());
Affinity kafkaPodSpecAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(KafkaResources.kafkaStatefulSetName(clusterName));
NodeSelectorRequirement kafkaPodNodeSelectorRequirement = kafkaPodSpecAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(kafkaPodNodeSelectorRequirement.getKey(), is(rackKey));
assertThat(kafkaPodNodeSelectorRequirement.getOperator(), is("Exists"));
PodAffinityTerm kafkaPodAffinityTerm = kafkaPodSpecAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
assertThat(kafkaPodAffinityTerm.getTopologyKey(), is(rackKey));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", clusterName));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(clusterName)));
String rackId = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out();
assertThat(rackId.trim(), is("zone"));
String brokerRack = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out();
assertThat(brokerRack.contains("broker.rack=zone"), is(true));
String uid = kubeClient().getPodUid(KafkaResources.kafkaPodName(clusterName, 0));
List<Event> events = kubeClient().listEventsByResourceUid(uid);
assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started));
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
}
Aggregations