Search in sources :

Example 11 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testControlPlaneListenerFeatureGate.

/**
 * Control Plane Listener
 * https://github.com/strimzi/proposals/blob/main/025-control-plain-listener.md
 */
@IsolatedTest("Feature Gates test for disabled ControlPlainListener")
@Tag(INTERNAL_CLIENTS_USED)
public void testControlPlaneListenerFeatureGate(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    int messageCount = 300;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 1;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-ControlPlaneListener", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
    LOGGER.info("Check for presence of ContainerPort 9090/tcp (tcp-ctrlplane) in first Kafka pod.");
    final Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), clusterName + "-kafka-").get(0);
    ContainerPort expectedControlPlaneContainerPort = new ContainerPort(9090, null, null, "tcp-ctrlplane", "TCP");
    List<ContainerPort> kafkaPodPorts = kafkaPod.getSpec().getContainers().get(0).getPorts();
    assertTrue(kafkaPodPorts.contains(expectedControlPlaneContainerPort));
    Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
    resourceManager.createResource(extensionContext, kafkaTopic);
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
    LOGGER.info("Delete first found Kafka broker pod.");
    kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
    LOGGER.info("Force Rolling Update of Kafka via annotation.");
    kafkaPods.keySet().forEach(podName -> {
        kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Pod(io.fabric8.kubernetes.api.model.Pod) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 12 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testKRaftMode.

/**
 * UseKRaft feature gate
 */
@IsolatedTest("Feature Gates test for enabled UseKRaft gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testKRaftMode(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 180;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 3;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets,+UseKRaft", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    Kafka kafka = KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build();
    // The builder cannot disable the EO. It has to be done this way.
    kafka.getSpec().setEntityOperator(null);
    resourceManager.createResource(extensionContext, kafka);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // Check that there is no ZooKeeper
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    assertThat("No ZooKeeper pods should exist", zkPods.size(), is(0));
    // Roll Kafka
    LOGGER.info("Force Rolling Update of Kafka via read-only configuration change.");
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().getConfig().put("log.retention.hours", 72), INFRA_NAMESPACE);
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientsSuccess(producerName, consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ArrayList(java.util.ArrayList) Kafka(io.strimzi.api.kafka.model.Kafka) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 13 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testStrimziPodSetsFeatureGate.

/**
 * UseStrimziPodSets feature gate
 * https://github.com/strimzi/proposals/blob/main/031-statefulset-removal.md
 */
@IsolatedTest("Feature Gates test for enabled UseStrimziPodSets gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testStrimziPodSetsFeatureGate(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector zooSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 600;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int zooReplicas = 1;
    int kafkaReplicas = 1;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
    resourceManager.createResource(extensionContext, kafkaTopic);
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
    // Delete one Zoo Pod
    Pod zooPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), KafkaResources.zookeeperStatefulSetName(clusterName) + "-").get(0);
    LOGGER.info("Delete first found ZooKeeper pod {}", zooPod.getMetadata().getName());
    kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), zooPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(zooSelector, zooReplicas);
    // Delete one Kafka Pod
    Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), KafkaResources.kafkaStatefulSetName(clusterName) + "-").get(0);
    LOGGER.info("Delete first found Kafka broker pod {}", kafkaPod.getMetadata().getName());
    kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
    RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
    // Roll Zoo
    LOGGER.info("Force Rolling Update of ZooKeeper via annotation.");
    Map<String, String> zooPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), zooSelector);
    zooPods.keySet().forEach(podName -> {
        kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), zooSelector, zooReplicas, zooPods);
    // Roll Kafka
    LOGGER.info("Force Rolling Update of Kafka via annotation.");
    Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
    kafkaPods.keySet().forEach(podName -> {
        kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
    });
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Pod(io.fabric8.kubernetes.api.model.Pod) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Example 14 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class TopicScalabilityIsolatedST method testModifyBigAmountOfTopicPartitions.

@IsolatedTest
void testModifyBigAmountOfTopicPartitions(ExtensionContext extensionContext) {
    final int defaultPartitionCount = 2;
    // Create topics
    KafkaTopicScalabilityUtils.createTopicsViaK8s(extensionContext, clusterOperator.getDeploymentNamespace(), sharedClusterName, topicPrefix, NUMBER_OF_TOPICS, defaultPartitionCount, 1, 1);
    KafkaTopicScalabilityUtils.waitForTopicsReady(clusterOperator.getDeploymentNamespace(), topicPrefix, NUMBER_OF_TOPICS);
    // Decrease partitions and expect not ready status
    KafkaTopicScalabilityUtils.modifyBigAmountOfTopics(clusterOperator.getDeploymentNamespace(), topicPrefix, NUMBER_OF_TOPICS, new KafkaTopicSpecBuilder().withPartitions(defaultPartitionCount - 1).build());
    KafkaTopicScalabilityUtils.waitForTopicsNotReady(clusterOperator.getDeploymentNamespace(), topicPrefix, NUMBER_OF_TOPICS);
    // Set back to default and check if topic becomes ready
    KafkaTopicScalabilityUtils.modifyBigAmountOfTopics(clusterOperator.getDeploymentNamespace(), topicPrefix, NUMBER_OF_TOPICS, new KafkaTopicSpecBuilder().withPartitions(defaultPartitionCount).build());
    KafkaTopicScalabilityUtils.waitForTopicsReady(clusterOperator.getDeploymentNamespace(), topicPrefix, NUMBER_OF_TOPICS);
}
Also used : KafkaTopicSpecBuilder(io.strimzi.api.kafka.model.KafkaTopicSpecBuilder) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 15 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class FeatureGatesIsolatedST method testKRaftMode.

/**
 * UseKRaft feature gate
 */
@IsolatedTest("Feature Gates test for enabled UseKRaft gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testKRaftMode(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 180;
    List<EnvVar> testEnvVars = new ArrayList<>();
    int kafkaReplicas = 3;
    testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets,+UseKRaft", null));
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
    Kafka kafka = KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build();
    // The builder cannot disable the EO. It has to be done this way.
    kafka.getSpec().setEntityOperator(null);
    resourceManager.createResource(extensionContext, kafka);
    LOGGER.info("Try to send some messages to Kafka over next few minutes.");
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // Check that there is no ZooKeeper
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    assertThat("No ZooKeeper pods should exist", zkPods.size(), is(0));
    // Roll Kafka
    LOGGER.info("Force Rolling Update of Kafka via read-only configuration change.");
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().getConfig().put("log.retention.hours", 72), INFRA_NAMESPACE);
    LOGGER.info("Wait for next reconciliation to happen.");
    RollingUpdateUtils.waitTillComponentHasRolled(INFRA_NAMESPACE, kafkaSelector, kafkaReplicas, kafkaPods);
    LOGGER.info("Waiting for clients to finish sending/receiving messages.");
    ClientUtils.waitForClientsSuccess(producerName, consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ArrayList(java.util.ArrayList) Kafka(io.strimzi.api.kafka.model.Kafka) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Random(java.util.Random) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Tag(org.junit.jupiter.api.Tag)

Aggregations

IsolatedTest (io.strimzi.test.annotations.IsolatedTest)22 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)12 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)12 EnvVar (io.fabric8.kubernetes.api.model.EnvVar)8 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)8 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)8 ArrayList (java.util.ArrayList)8 Random (java.util.Random)8 Tag (org.junit.jupiter.api.Tag)8 Pod (io.fabric8.kubernetes.api.model.Pod)6 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)6 KafkaTopic (io.strimzi.api.kafka.model.KafkaTopic)6 OrderedProperties (io.strimzi.operator.common.model.OrderedProperties)6 File (java.io.File)6 HashMap (java.util.HashMap)6 List (java.util.List)6 Map (java.util.Map)6 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)6 Matchers.containsString (org.hamcrest.Matchers.containsString)5 Matchers.emptyString (org.hamcrest.Matchers.emptyString)5