Search in sources :

Example 81 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMakerIsolatedST method testScaleMirrorMakerSubresource.

@ParallelNamespaceTest
@Tag(SCALABILITY)
void testScaleMirrorMakerSubresource(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaClusterSourceName = clusterName + "-source";
    String kafkaClusterTargetName = clusterName + "-target";
    LOGGER.info("Creating kafka source cluster {}", kafkaClusterSourceName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build());
    LOGGER.info("Creating kafka target cluster {}", kafkaClusterTargetName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
    resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, kafkaClusterTargetName, kafkaClusterSourceName, ClientUtils.generateRandomConsumerGroup(), 1, false).build());
    int scaleTo = 4;
    long mmObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getObservedGeneration();
    String mmGenName = kubeClient(namespaceName).listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getGenerateName();
    LOGGER.info("-------> Scaling KafkaMirrorMaker subresource <-------");
    LOGGER.info("Scaling subresource replicas to {}", scaleTo);
    cmdKubeClient().namespace(namespaceName).scaleByName(KafkaMirrorMaker.RESOURCE_KIND, clusterName, scaleTo);
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), scaleTo);
    LOGGER.info("Check if replicas is set to {}, naming prefix should be same and observed generation higher", scaleTo);
    List<String> mmPods = kubeClient(namespaceName).listPodNames(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND);
    assertThat(mmPods.size(), is(4));
    assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(namespaceName).withName(clusterName).get().getSpec().getReplicas(), is(4));
    assertThat(KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getReplicas(), is(4));
    /*
        observed generation should be higher than before scaling -> after change of spec and successful reconciliation,
        the observed generation is increased
        */
    assertThat(mmObsGen < KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getObservedGeneration(), is(true));
    for (String pod : mmPods) {
        assertThat(pod.contains(mmGenName), is(true));
    }
}
Also used : CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 82 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class ReconciliationST method testPauseReconciliationInKafkaAndKafkaConnectWithConnector.

@ParallelNamespaceTest
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
void testPauseReconciliationInKafkaAndKafkaConnectWithConnector(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName);
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaSsName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).build());
    LOGGER.info("Adding pause annotation into Kafka resource and also scaling replicas to 4, new pod should not appear");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
        kafka.getMetadata().setAnnotations(PAUSE_ANNO);
        kafka.getSpec().getKafka().setReplicas(SCALE_TO);
    }, namespaceName);
    LOGGER.info("Kafka should contain status with {}", CustomResourceStatus.ReconciliationPaused.toString());
    KafkaUtils.waitForKafkaStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
    PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, kafkaSsName, 3);
    LOGGER.info("Setting annotation to \"false\", Kafka should be scaled to {}", SCALE_TO);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
    RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, kafkaSelector, SCALE_TO);
    LOGGER.info("Deploying KafkaConnect with pause annotation from the start, no pods should appear");
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editOrNewMetadata().addToAnnotations(PAUSE_ANNO).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
    String connectDepName = KafkaConnectResources.deploymentName(clusterName);
    KafkaConnectUtils.waitForConnectStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
    PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, connectDepName, 0);
    LOGGER.info("Setting annotation to \"false\" and creating KafkaConnector");
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kc -> kc.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, connectDepName, 1);
    resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).build());
    String connectPodName = kubeClient(namespaceName).listPods(clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
    String connectorSpec = KafkaConnectorUtils.getConnectorSpecFromConnectAPI(namespaceName, connectPodName, clusterName);
    LOGGER.info("Adding pause annotation into the KafkaConnector and scaling taskMax to 4");
    KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(clusterName, connector -> {
        connector.getMetadata().setAnnotations(PAUSE_ANNO);
        connector.getSpec().setTasksMax(SCALE_TO);
    }, namespaceName);
    KafkaConnectorUtils.waitForConnectorStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
    KafkaConnectorUtils.waitForConnectorSpecFromConnectAPIStability(namespaceName, connectPodName, clusterName, connectorSpec);
    LOGGER.info("Setting annotation to \"false\", taskMax should be increased to {}", SCALE_TO);
    KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(clusterName, connector -> connector.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
    String oldConfig = new JsonObject(connectorSpec).getValue("config").toString();
    JsonObject newConfig = new JsonObject(KafkaConnectorUtils.waitForConnectorConfigUpdate(namespaceName, connectPodName, clusterName, oldConfig, "localhost"));
    assertThat(newConfig.getValue("tasks.max"), is(Integer.toString(SCALE_TO)));
}
Also used : LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) JsonObject(io.vertx.core.json.JsonObject) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 83 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class ReconciliationST method testPauseReconciliationInKafkaRebalanceAndTopic.

@ParallelNamespaceTest
@Tag(CRUISE_CONTROL)
void testPauseReconciliationInKafkaRebalanceAndTopic(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    LOGGER.info("Adding pause annotation into KafkaTopic resource and changing replication factor");
    KafkaTopicResource.replaceTopicResourceInSpecificNamespace(topicName, topic -> {
        topic.getMetadata().setAnnotations(PAUSE_ANNO);
        topic.getSpec().setPartitions(SCALE_TO);
    }, namespaceName);
    KafkaTopicUtils.waitForKafkaTopicStatus(namespaceName, topicName, CustomResourceStatus.ReconciliationPaused);
    KafkaTopicUtils.waitForKafkaTopicSpecStability(topicName, KafkaResources.kafkaPodName(clusterName, 0), KafkaResources.plainBootstrapAddress(clusterName));
    LOGGER.info("Setting annotation to \"false\", partitions should be scaled to {}", SCALE_TO);
    KafkaTopicResource.replaceTopicResource(topicName, topic -> topic.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"));
    KafkaTopicUtils.waitForKafkaTopicPartitionChange(topicName, SCALE_TO);
    resourceManager.createResource(extensionContext, KafkaRebalanceTemplates.kafkaRebalance(clusterName).build());
    LOGGER.info("Waiting for {}, then add pause and rebalance annotation, rebalancing should not be triggered", KafkaRebalanceState.ProposalReady);
    KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.ProposalReady);
    KafkaRebalanceResource.replaceKafkaRebalanceResourceInSpecificNamespace(clusterName, rebalance -> rebalance.getMetadata().setAnnotations(PAUSE_ANNO), namespaceName);
    KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.ReconciliationPaused);
    KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName, KafkaRebalanceAnnotation.approve);
    // unfortunately we don't have any option to check, if something is changed when reconciliations are paused
    // so we will check stability of status
    KafkaRebalanceUtils.waitForRebalanceStatusStability(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName);
    LOGGER.info("Setting annotation to \"false\" and waiting for KafkaRebalance to be in {} state", KafkaRebalanceState.Ready);
    KafkaRebalanceResource.replaceKafkaRebalanceResourceInSpecificNamespace(clusterName, rebalance -> rebalance.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
    KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.ProposalReady);
    // because approve annotation wasn't reflected, approving again
    KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName, KafkaRebalanceAnnotation.approve);
    KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.Ready);
}
Also used : Reconciliation(io.strimzi.operator.common.Reconciliation) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 84 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class JmxIsolatedST method testKafkaZookeeperAndKafkaConnectWithJMX.

@ParallelNamespaceTest
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
void testKafkaZookeeperAndKafkaConnectWithJMX(ExtensionContext extensionContext) {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String zkSecretName = clusterName + "-zookeeper-jmx";
    final String connectJmxSecretName = clusterName + "-kafka-connect-jmx";
    final String kafkaJmxSecretName = clusterName + "-kafka-jmx";
    Map<String, String> jmxSecretLabels = Collections.singletonMap("my-label", "my-value");
    Map<String, String> jmxSecretAnnotations = Collections.singletonMap("my-annotation", "some-value");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editOrNewSpec().editKafka().withNewJmxOptions().withAuthentication(new KafkaJmxAuthenticationPassword()).endJmxOptions().endKafka().editOrNewZookeeper().withNewJmxOptions().withAuthentication(new KafkaJmxAuthenticationPassword()).endJmxOptions().editOrNewTemplate().withNewJmxSecret().withNewMetadata().withLabels(jmxSecretLabels).withAnnotations(jmxSecretAnnotations).endMetadata().endJmxSecret().endTemplate().endZookeeper().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
    String clientsPodName = kubeClient().listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
    resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, true).editOrNewSpec().withNewJmxOptions().withAuthentication(new KafkaJmxAuthenticationPassword()).endJmxOptions().endSpec().build());
    Secret jmxZkSecret = kubeClient().getSecret(namespaceName, zkSecretName);
    String kafkaResults = JmxUtils.collectJmxMetricsWithWait(namespaceName, KafkaResources.brokersServiceName(clusterName), kafkaJmxSecretName, clientsPodName, "bean kafka.server:type=app-info\nget -i *");
    String kafkaConnectResults = JmxUtils.collectJmxMetricsWithWait(namespaceName, KafkaConnectResources.serviceName(clusterName), connectJmxSecretName, clientsPodName, "bean kafka.connect:type=app-info\nget -i *");
    String zkBeans = JmxUtils.collectJmxMetricsWithWait(namespaceName, KafkaResources.zookeeperHeadlessServiceName(clusterName), zkSecretName, clientsPodName, "domain org.apache.ZooKeeperService\nbeans");
    String zkBean = Arrays.asList(zkBeans.split("\\n")).stream().filter(bean -> bean.matches("org.apache.ZooKeeperService:name[0-9]+=ReplicatedServer_id[0-9]+")).findFirst().get();
    String zkResults = JmxUtils.collectJmxMetricsWithWait(namespaceName, KafkaResources.zookeeperHeadlessServiceName(clusterName), zkSecretName, clientsPodName, "bean " + zkBean + "\nget -i *");
    assertThat("Result from Kafka JMX doesn't contain right version of Kafka, result: " + kafkaResults, kafkaResults, containsString("version = " + Environment.ST_KAFKA_VERSION));
    assertThat("Result from KafkaConnect JMX doesn't contain right version of Kafka, result: " + kafkaConnectResults, kafkaConnectResults, containsString("version = " + Environment.ST_KAFKA_VERSION));
    assertThat("Result from Zookeeper JMX doesn't contain right quorum size, result: " + zkResults, zkResults, containsString("QuorumSize = 3"));
    LOGGER.info("Checking that Zookeeper JMX secret is created with custom labels and annotations");
    assertTrue(jmxZkSecret.getMetadata().getLabels().entrySet().containsAll(jmxSecretLabels.entrySet()));
    assertTrue(jmxZkSecret.getMetadata().getAnnotations().entrySet().containsAll(jmxSecretAnnotations.entrySet()));
}
Also used : Secret(io.fabric8.kubernetes.api.model.Secret) KafkaJmxAuthenticationPassword(io.strimzi.api.kafka.model.KafkaJmxAuthenticationPassword) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 85 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class LoggingChangeST method testLoggingHierarchy.

@ParallelNamespaceTest
void testLoggingHierarchy(ExtensionContext extensionContext) {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String namespaceName = extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get(Constants.NAMESPACE_KEY).toString();
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build(), KafkaClientsTemplates.kafkaClients(kafkaClientsName).build());
    resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, true).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").endSpec().build(), KafkaConnectorTemplates.defaultKafkaConnector(clusterName, clusterName, 1).build());
    String connectorClassName = "org.apache.kafka.connect.file.FileStreamSourceConnector";
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    LOGGER.info("Changing rootLogger level in KafkaConnector to ERROR with inline logging");
    InlineLogging inlineError = new InlineLogging();
    inlineError.setLoggers(Collections.singletonMap("log4j.logger." + connectorClassName, "ERROR"));
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, connect -> connect.getSpec().setLogging(inlineError), namespaceName);
    LOGGER.info("Waiting for Connect API loggers will contain desired settings");
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/" + connectorClassName).out().contains("ERROR"));
    LOGGER.info("Restarting Kafka connector {} with class name {}", clusterName, connectorClassName);
    cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "-X", "POST", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/connectors/" + clusterName + "/restart");
    KafkaConnectorUtils.waitForConnectorWorkerStatus(namespaceName, kafkaClientsPodName, clusterName, clusterName, "RUNNING");
    LOGGER.info("Checking that logger is same for connector with class name {}", connectorClassName);
    String connectorLogger = cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/" + connectorClassName).out();
    assertTrue(connectorLogger.contains("ERROR"));
    LOGGER.info("Changing KafkaConnect's root logger to WARN, KafkaConnector: {} shouldn't inherit it", clusterName);
    InlineLogging inlineWarn = new InlineLogging();
    inlineWarn.setLoggers(Collections.singletonMap("connect.root.logger.level", "WARN"));
    inlineWarn.setLoggers(Map.of("connect.root.logger.level", "WARN", "log4j.logger." + connectorClassName, "ERROR"));
    KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, connect -> connect.getSpec().setLogging(inlineWarn), namespaceName);
    TestUtils.waitFor("Logger change", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient().namespace(namespaceName).execInPod(kafkaClientsPodName, "curl", "http://" + KafkaConnectResources.serviceName(clusterName) + ":8083/admin/loggers/root").out().contains("WARN"));
    LOGGER.info("Checking if KafkaConnector {} doesn't inherit logger from KafkaConnect", connectorClassName);
    KafkaConnectorUtils.loggerStabilityWait(namespaceName, clusterName, kafkaClientsPodName, "ERROR", connectorClassName);
}
Also used : InlineLogging(io.strimzi.api.kafka.model.InlineLogging) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)302 Tag (org.junit.jupiter.api.Tag)166 Matchers.containsString (org.hamcrest.Matchers.containsString)148 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)114 InternalKafkaClient (io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient)112 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)94 KafkaUser (io.strimzi.api.kafka.model.KafkaUser)86 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)76 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)66 HashMap (java.util.HashMap)52 ResourceRequirementsBuilder (io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder)50 TestUtils.fromYamlString (io.strimzi.test.TestUtils.fromYamlString)48 Matchers.emptyOrNullString (org.hamcrest.Matchers.emptyOrNullString)48 ExternalKafkaClient (io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient)42 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)40 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)40 Secret (io.fabric8.kubernetes.api.model.Secret)40 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)38 KafkaListenerAuthenticationTls (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls)36 Pod (io.fabric8.kubernetes.api.model.Pod)32