Search in sources :

Example 1 with Rack

use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.

the class KafkaConnectClusterTest method checkRack.

private void checkRack(Deployment deployment, KafkaConnect resource) {
    Rack rack = resource.getSpec().getRack();
    if (rack != null) {
        PodSpec podSpec = deployment.getSpec().getTemplate().getSpec();
        // check that pod spec contains the init Kafka container
        List<Container> initContainers = podSpec.getInitContainers();
        assertThat(initContainers, is(notNullValue()));
        assertThat(initContainers.size() > 0, is(true));
        boolean isInitKafkaConnect = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaConnectCluster.INIT_NAME));
        assertThat(isInitKafkaConnect, is(true));
    }
}
Also used : Rack(io.strimzi.api.kafka.model.Rack) Container(io.fabric8.kubernetes.api.model.Container) PodSpec(io.fabric8.kubernetes.api.model.PodSpec)

Example 2 with Rack

use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.

the class KafkaBrokerConfigurationBuilderTest method testRackAndBrokerId.

@ParallelTest
public void testRackAndBrokerId() {
    String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withBrokerId().withRackId(new Rack("failure-domain.kubernetes.io/zone")).build();
    assertThat(configuration, isEquivalent("broker.id=${STRIMZI_BROKER_ID}\n" + "node.id=${STRIMZI_BROKER_ID}\n" + "broker.rack=${STRIMZI_RACK_ID}"));
}
Also used : Rack(io.strimzi.api.kafka.model.Rack) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 3 with Rack

use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.

the class KafkaClusterTest method checkStatefulSet.

private void checkStatefulSet(StatefulSet sts, Kafka cm, boolean isOpenShift) {
    assertThat(sts.getMetadata().getName(), is(KafkaCluster.kafkaClusterName(cluster)));
    // ... in the same namespace ...
    assertThat(sts.getMetadata().getNamespace(), is(namespace));
    // ... with these labels
    assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
    assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
    assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
    List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
    assertThat(containers.size(), is(1));
    // checks on the main Kafka container
    assertThat(sts.getSpec().getReplicas(), is(Integer.valueOf(replicas)));
    assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
    assertThat(containers.get(0).getImage(), is(image));
    assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
    assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
    assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
    assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
    assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
    assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
    assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
    assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
    assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
    assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
    assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
    assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
    assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
    assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
    assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
    assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
    assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
    assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
    assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
    assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
    assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
    assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
    assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
    assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
    assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
    assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().get().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
    if (cm.getSpec().getKafka().getRack() != null) {
        Rack rack = cm.getSpec().getKafka().getRack();
        // check that the pod spec contains anti-affinity rules with the right topology key
        PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
        assertThat(podSpec.getAffinity(), is(notNullValue()));
        assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
        assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
        List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
        assertThat(terms, is(notNullValue()));
        assertThat(terms.size() > 0, is(true));
        boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
        assertThat(isTopologyKey, is(true));
        // check that pod spec contains the init Kafka container
        List<Container> initContainers = podSpec.getInitContainers();
        assertThat(initContainers, is(notNullValue()));
        assertThat(initContainers.size() > 0, is(true));
        boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
        assertThat(isInitKafka, is(true));
    }
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) VolumeMount(io.fabric8.kubernetes.api.model.VolumeMount) ExternalTrafficPolicy(io.strimzi.api.kafka.model.template.ExternalTrafficPolicy) PersistentClaimStorageOverrideBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageOverrideBuilder) IntOrString(io.fabric8.kubernetes.api.model.IntOrString) Rack(io.strimzi.api.kafka.model.Rack) GenericKafkaListenerConfigurationBrokerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBrokerBuilder) SecurityContextBuilder(io.fabric8.kubernetes.api.model.SecurityContextBuilder) PodDisruptionBudget(io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget) Collections.singletonList(java.util.Collections.singletonList) ResourceRequirements(io.fabric8.kubernetes.api.model.ResourceRequirements) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) ContainerEnvVar(io.strimzi.api.kafka.model.ContainerEnvVar) KafkaJmxOptionsBuilder(io.strimzi.api.kafka.model.KafkaJmxOptionsBuilder) LabelSelectorBuilder(io.fabric8.kubernetes.api.model.LabelSelectorBuilder) CertSecretSource(io.strimzi.api.kafka.model.CertSecretSource) JbodStorageBuilder(io.strimzi.api.kafka.model.storage.JbodStorageBuilder) Matchers.allOf(org.hamcrest.Matchers.allOf) Set(java.util.Set) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) GenericSecretSourceBuilder(io.strimzi.api.kafka.model.GenericSecretSourceBuilder) PodSecurityContextBuilder(io.fabric8.kubernetes.api.model.PodSecurityContextBuilder) Matchers.contains(org.hamcrest.Matchers.contains) PasswordGenerator(io.strimzi.operator.common.PasswordGenerator) HostAliasBuilder(io.fabric8.kubernetes.api.model.HostAliasBuilder) KafkaListenerAuthenticationOAuthBuilder(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationOAuthBuilder) PersistentVolumeClaim(io.fabric8.kubernetes.api.model.PersistentVolumeClaim) Matchers.containsString(org.hamcrest.Matchers.containsString) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) ClusterRoleBinding(io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) IpFamily(io.strimzi.api.kafka.model.template.IpFamily) LocalObjectReference(io.fabric8.kubernetes.api.model.LocalObjectReference) OwnerReference(io.fabric8.kubernetes.api.model.OwnerReference) GenericKafkaListenerConfigurationBootstrapBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBootstrapBuilder) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) ArrayList(java.util.ArrayList) Matchers.hasProperty(org.hamcrest.Matchers.hasProperty) PersistentClaimStorageBuilder(io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder) GenericKafkaListenerConfigurationBroker(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBroker) SecurityContext(io.fabric8.kubernetes.api.model.SecurityContext) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) KafkaListenerAuthenticationCustomBuilder(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationCustomBuilder) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) KafkaJmxAuthenticationPasswordBuilder(io.strimzi.api.kafka.model.KafkaJmxAuthenticationPasswordBuilder) IOException(java.io.IOException) StatefulSet(io.fabric8.kubernetes.api.model.apps.StatefulSet) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) Reconciliation(io.strimzi.operator.common.Reconciliation) Util(io.strimzi.operator.common.Util) KafkaListenerType(io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType) SystemPropertyBuilder(io.strimzi.api.kafka.model.SystemPropertyBuilder) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) NetworkPolicyPeer(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeer) OpenSslCertManager(io.strimzi.certs.OpenSslCertManager) X509Certificate(java.security.cert.X509Certificate) CoreMatchers.is(org.hamcrest.CoreMatchers.is) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) Storage(io.strimzi.api.kafka.model.storage.Storage) ParallelSuite(io.strimzi.test.annotations.ParallelSuite) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) NetworkPolicyIngressRule(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRule) Route(io.fabric8.openshift.api.model.Route) SystemProperty(io.strimzi.api.kafka.model.SystemProperty) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) KafkaAuthorizationKeycloakBuilder(io.strimzi.api.kafka.model.KafkaAuthorizationKeycloakBuilder) IpFamilyPolicy(io.strimzi.api.kafka.model.template.IpFamilyPolicy) ParallelTest(io.strimzi.test.annotations.ParallelTest) Collections.emptyList(java.util.Collections.emptyList) Collectors(java.util.stream.Collectors) List(java.util.List) CertSecretSourceBuilder(io.strimzi.api.kafka.model.CertSecretSourceBuilder) Labels(io.strimzi.operator.common.model.Labels) NodeAddressType(io.strimzi.api.kafka.model.listener.NodeAddressType) RackBuilder(io.strimzi.api.kafka.model.RackBuilder) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Ingress(io.fabric8.kubernetes.api.model.networking.v1.Ingress) Secret(io.fabric8.kubernetes.api.model.Secret) TopologySpreadConstraintBuilder(io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder) NetworkPolicyPeerBuilder(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder) PodManagementPolicy(io.strimzi.api.kafka.model.template.PodManagementPolicy) ContainerTemplate(io.strimzi.api.kafka.model.template.ContainerTemplate) Container(io.fabric8.kubernetes.api.model.Container) WeightedPodAffinityTerm(io.fabric8.kubernetes.api.model.WeightedPodAffinityTerm) EphemeralStorageBuilder(io.strimzi.api.kafka.model.storage.EphemeralStorageBuilder) CertificateParsingException(java.security.cert.CertificateParsingException) HashMap(java.util.HashMap) GenericKafkaListenerConfigurationBootstrap(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBootstrap) MetricsAndLogging(io.strimzi.operator.common.MetricsAndLogging) HashSet(java.util.HashSet) HostAlias(io.fabric8.kubernetes.api.model.HostAlias) JmxPrometheusExporterMetrics(io.strimzi.api.kafka.model.JmxPrometheusExporterMetrics) JmxPrometheusExporterMetricsBuilder(io.strimzi.api.kafka.model.JmxPrometheusExporterMetricsBuilder) InlineLogging(io.strimzi.api.kafka.model.InlineLogging) MetricsConfig(io.strimzi.api.kafka.model.MetricsConfig) TestUtils(io.strimzi.test.TestUtils) Collections.singletonMap(java.util.Collections.singletonMap) Service(io.fabric8.kubernetes.api.model.Service) CertificateExpirationPolicy(io.strimzi.api.kafka.model.CertificateExpirationPolicy) Volume(io.fabric8.kubernetes.api.model.Volume) Matchers.hasEntry(org.hamcrest.Matchers.hasEntry) CruiseControlConfigurationParameters(io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters) Collections.emptyMap(java.util.Collections.emptyMap) TopologySpreadConstraint(io.fabric8.kubernetes.api.model.TopologySpreadConstraint) TestUtils.set(io.strimzi.test.TestUtils.set) LabelSelectorRequirementBuilder(io.fabric8.kubernetes.api.model.LabelSelectorRequirementBuilder) NetworkPolicy(io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy) ServiceAccount(io.fabric8.kubernetes.api.model.ServiceAccount) Kafka(io.strimzi.api.kafka.model.Kafka) Collections(java.util.Collections) Rack(io.strimzi.api.kafka.model.Rack) Container(io.fabric8.kubernetes.api.model.Container) PodSpec(io.fabric8.kubernetes.api.model.PodSpec) WeightedPodAffinityTerm(io.fabric8.kubernetes.api.model.WeightedPodAffinityTerm) Quantity(io.fabric8.kubernetes.api.model.Quantity)

Example 4 with Rack

use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.

the class KafkaBrokerConfigurationBuilderTest method testRackId.

@ParallelTest
public void testRackId() {
    String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withRackId(new Rack("failure-domain.kubernetes.io/zone")).build();
    assertThat(configuration, isEquivalent("broker.rack=${STRIMZI_RACK_ID}"));
}
Also used : Rack(io.strimzi.api.kafka.model.Rack) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ParallelTest(io.strimzi.test.annotations.ParallelTest)

Example 5 with Rack

use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.

the class SpecificIsolatedST method testRackAwareConnectWrongDeployment.

@IsolatedTest("Modification of shared Cluster Operator configuration")
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAwareConnectWrongDeployment(ExtensionContext extensionContext) {
    assumeFalse(Environment.isNamespaceRbacScope());
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    Map<String, String> label = Collections.singletonMap("my-label", "value");
    Map<String, String> anno = Collections.singletonMap("my-annotation", "value");
    // We need to update CO configuration to set OPERATION_TIMEOUT to shorter value, because we expect timeout in that test
    Map<String, String> coSnapshot = DeploymentUtils.depSnapshot(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName());
    // We have to install CO in class stack, otherwise it will be deleted at the end of test case and all following tests will fail
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withOperationTimeout(CO_OPERATION_TIMEOUT_SHORT).withReconciliationInterval(Constants.RECONCILIATION_INTERVAL).createInstallation().runBundleInstallation();
    coSnapshot = DeploymentUtils.waitTillDepHasRolled(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName(), 1, coSnapshot);
    String wrongRackKey = "wrong-key";
    String rackKey = "rack-key";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().addToConfig("replica.selector.class", "org.apache.kafka.common.replica.RackAwareReplicaSelector").endKafka().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(INFRA_NAMESPACE, false, kafkaClientsName).build());
    String kafkaClientsPodName = kubeClient(INFRA_NAMESPACE).listPodsByPrefixInName(INFRA_NAMESPACE, kafkaClientsName).get(0).getMetadata().getName();
    LOGGER.info("Deploy KafkaConnect with wrong rack-aware topology key: {}", wrongRackKey);
    KafkaConnect kc = KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, clusterName, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().withNewRack().withTopologyKey(wrongRackKey).endRack().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").editOrNewTemplate().withNewClusterRoleBinding().withNewMetadata().withAnnotations(anno).withLabels(label).endMetadata().endClusterRoleBinding().endTemplate().endSpec().build();
    resourceManager.createResource(extensionContext, false, kc);
    NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kc, KafkaConnectResources.deploymentName(clusterName));
    PodUtils.waitForPendingPod(INFRA_NAMESPACE, clusterName + "-connect");
    List<String> connectWrongPods = kubeClient(INFRA_NAMESPACE).listPodNames(INFRA_NAMESPACE, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND);
    String connectWrongPodName = connectWrongPods.get(0);
    LOGGER.info("Waiting for ClusterOperator to get timeout operation of incorrectly set up KafkaConnect");
    KafkaConnectUtils.waitForKafkaConnectCondition("TimeoutException", "NotReady", INFRA_NAMESPACE, clusterName);
    PodStatus kcWrongStatus = kubeClient().getPod(INFRA_NAMESPACE, connectWrongPodName).getStatus();
    assertThat("Unschedulable", is(kcWrongStatus.getConditions().get(0).getReason()));
    assertThat("PodScheduled", is(kcWrongStatus.getConditions().get(0).getType()));
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> {
        kafkaConnect.getSpec().setRack(new Rack(rackKey));
    }, INFRA_NAMESPACE);
    KafkaConnectUtils.waitForConnectReady(INFRA_NAMESPACE, clusterName);
    LOGGER.info("KafkaConnect is ready with changed rack key: '{}'.", rackKey);
    LOGGER.info("Verify KafkaConnect rack key update");
    kc = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get();
    assertThat(kc.getSpec().getRack().getTopologyKey(), is(rackKey));
    List<String> kcPods = kubeClient(INFRA_NAMESPACE).listPodNames(INFRA_NAMESPACE, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND);
    KafkaConnectUtils.sendReceiveMessagesThroughConnect(kcPods.get(0), TOPIC_NAME, kafkaClientsPodName, INFRA_NAMESPACE, clusterName);
    // check the ClusterRoleBinding annotations and labels in Kafka cluster
    Map<String, String> actualLabel = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getTemplate().getClusterRoleBinding().getMetadata().getLabels();
    Map<String, String> actualAnno = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getTemplate().getClusterRoleBinding().getMetadata().getAnnotations();
    assertThat(actualLabel, is(label));
    assertThat(actualAnno, is(anno));
    // Revert changes for CO deployment
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).createInstallation().runBundleInstallation();
    DeploymentUtils.waitTillDepHasRolled(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName(), 1, coSnapshot);
}
Also used : PodStatus(io.fabric8.kubernetes.api.model.PodStatus) Rack(io.strimzi.api.kafka.model.Rack) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) IsolatedTest(io.strimzi.systemtest.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

Rack (io.strimzi.api.kafka.model.Rack)10 ParallelTest (io.strimzi.test.annotations.ParallelTest)6 Container (io.fabric8.kubernetes.api.model.Container)4 PodSpec (io.fabric8.kubernetes.api.model.PodSpec)4 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)4 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)2 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)2 ContainerPort (io.fabric8.kubernetes.api.model.ContainerPort)2 EnvVar (io.fabric8.kubernetes.api.model.EnvVar)2 HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)2 HostAlias (io.fabric8.kubernetes.api.model.HostAlias)2 HostAliasBuilder (io.fabric8.kubernetes.api.model.HostAliasBuilder)2 IntOrString (io.fabric8.kubernetes.api.model.IntOrString)2 LabelSelectorBuilder (io.fabric8.kubernetes.api.model.LabelSelectorBuilder)2 LabelSelectorRequirementBuilder (io.fabric8.kubernetes.api.model.LabelSelectorRequirementBuilder)2 LocalObjectReference (io.fabric8.kubernetes.api.model.LocalObjectReference)2 OwnerReference (io.fabric8.kubernetes.api.model.OwnerReference)2 PersistentVolumeClaim (io.fabric8.kubernetes.api.model.PersistentVolumeClaim)2 PodSecurityContextBuilder (io.fabric8.kubernetes.api.model.PodSecurityContextBuilder)2 PodStatus (io.fabric8.kubernetes.api.model.PodStatus)2