Search in sources :

Example 21 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class ValidationVisitorTest method testValidationErrorsAreLogged.

@Test
public void testValidationErrorsAreLogged() {
    Kafka k = TestUtils.fromYaml("/example.yaml", Kafka.class, true);
    assertThat(k, is(notNullValue()));
    TestLogger logger = TestLogger.create(ValidationVisitorTest.class);
    HasMetadata resource = new KafkaBuilder().withNewMetadata().withName("testname").withNamespace("testnamespace").endMetadata().withApiVersion("v1beta2").build();
    Set<Condition> warningConditions = new HashSet<>();
    ResourceVisitor.visit(Reconciliation.DUMMY_RECONCILIATION, k, new ValidationVisitor(resource, logger, warningConditions));
    List<String> warningMessages = warningConditions.stream().map(Condition::getMessage).collect(Collectors.toList());
    assertThat(warningMessages, hasItem("Contains object at path spec.kafka with an unknown property: foo"));
    assertThat(warningMessages, hasItem("In API version v1beta2 the enableECDSA property at path spec.kafka.listeners.auth.enableECDSA has been deprecated."));
    assertThat(warningMessages, hasItem("In API version v1beta2 the service property at path spec.kafkaExporter.template.service has been deprecated. " + "The Kafka Exporter service has been removed."));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "Contains object at path spec.kafka with an unknown property: foo"));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1beta2 the enableECDSA property at path spec.kafka.listeners.auth.enableECDSA has been deprecated."));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1beta2 the service property at path spec.kafkaExporter.template.service has been deprecated. " + "The Kafka Exporter service has been removed."));
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Kafka(io.strimzi.api.kafka.model.Kafka) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) TestLogger(io.strimzi.test.logging.TestLogger) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 22 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class LoggingChangeST method testNotExistingCMSetsDefaultLogging.

@ParallelNamespaceTest
void testNotExistingCMSetsDefaultLogging(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String defaultProps = TestUtils.getFileAsString(TestUtils.USER_PATH + "/../cluster-operator/src/main/resources/kafkaDefaultLoggingProperties");
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    String cmData = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=INFO, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO";
    String existingCmName = "external-cm";
    String nonExistingCmName = "non-existing-cm-name";
    ConfigMap configMap = new ConfigMapBuilder().withNewMetadata().withName(existingCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", cmData)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMap);
    LOGGER.info("Deploying Kafka with custom logging");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editOrNewSpec().editKafka().withExternalLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(existingCmName).withOptional(false).build()).endValueFrom().build()).endKafka().endSpec().build());
    String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    String log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertTrue(log4jFile.contains(cmData));
    LOGGER.info("Changing external logging's CM to not existing one");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().getKafka().setLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(nonExistingCmName).withOptional(false).build()).endValueFrom().build()), namespaceName);
    RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
    LOGGER.info("Checking that log4j.properties in custom-config isn't empty and configuration is default");
    log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertFalse(log4jFile.isEmpty());
    assertTrue(log4jFile.contains(cmData));
    assertFalse(log4jFile.contains(defaultProps));
    LOGGER.info("Checking if Kafka:{} contains error about non-existing CM", clusterName);
    Condition condition = KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getConditions().get(0);
    assertThat(condition.getType(), is(CustomResourceStatus.NotReady.toString()));
    assertTrue(condition.getMessage().matches("ConfigMap " + nonExistingCmName + " with external logging configuration does not exist .*"));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) Condition(io.strimzi.api.kafka.model.status.Condition) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 23 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class CustomResourceStatusIsolatedST method testKafkaUserStatusNotReady.

@ParallelTest
void testKafkaUserStatusNotReady(ExtensionContext extensionContext) {
    // Simulate NotReady state with userName longer than 64 characters
    String userName = "sasl-use-rabcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
    resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
    KafkaUserUtils.waitForKafkaUserNotReady(Constants.INFRA_NAMESPACE, userName);
    LOGGER.info("Checking status of deployed KafkaUser {}", userName);
    Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(Constants.INFRA_NAMESPACE).withName(userName).get().getStatus().getConditions().get(0);
    LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
    LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
    LOGGER.info("KafkaUser Message: {}", kafkaCondition.getMessage());
    LOGGER.info("KafkaUser Reason: {}", kafkaCondition.getReason());
    assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(NotReady.toString()));
    LOGGER.info("KafkaUser {} is in desired state: {}", userName, kafkaCondition.getType());
    KafkaUserResource.kafkaUserClient().inNamespace(Constants.INFRA_NAMESPACE).withName(userName).delete();
    KafkaUserUtils.waitForKafkaUserDeletion(Constants.INFRA_NAMESPACE, userName);
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) StringContains.containsString(org.hamcrest.core.StringContains.containsString) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 24 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class CustomResourceStatusIsolatedST method testKafkaUserStatus.

@ParallelTest
void testKafkaUserStatus(ExtensionContext extensionContext) {
    String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
    LOGGER.info("Checking status of deployed KafkaUser");
    Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(Constants.INFRA_NAMESPACE).withName(userName).get().getStatus().getConditions().get(0);
    LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
    LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
    assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(Ready.toString()));
    LOGGER.info("KafkaUser is in desired state: Ready");
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) StringContains.containsString(org.hamcrest.core.StringContains.containsString) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 25 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class UserST method alterBigAmountOfUsers.

synchronized void alterBigAmountOfUsers(ExtensionContext extensionContext, String userName, String typeOfUser, int numberOfUsers, int producerRate, int consumerRate, int requestsPercentage, double mutationRate) {
    KafkaUserQuotas kuq = new KafkaUserQuotas();
    kuq.setConsumerByteRate(consumerRate);
    kuq.setProducerByteRate(producerRate);
    kuq.setRequestPercentage(requestsPercentage);
    kuq.setControllerMutationRate(mutationRate);
    LOGGER.info("Updating of existing KafkaUsers");
    for (int i = 0; i < numberOfUsers; i++) {
        String userNameWithSuffix = userName + "-" + i;
        if (typeOfUser.equals("TLS")) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().editSpec().withQuotas(kuq).endSpec().build());
        } else {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().editSpec().withQuotas(kuq).endSpec().build());
        }
        LOGGER.info("[After update] Checking status of KafkaUser {}", userNameWithSuffix);
        Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userNameWithSuffix).get().getStatus().getConditions().get(0);
        LOGGER.debug("KafkaUser condition status: {}", kafkaCondition.getStatus());
        LOGGER.debug("KafkaUser condition type: {}", kafkaCondition.getType());
        assertThat(kafkaCondition.getType(), is(Ready.toString()));
        LOGGER.debug("KafkaUser {} is in desired state: {}", userNameWithSuffix, kafkaCondition.getType());
        KafkaUserQuotas kuqAfter = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userNameWithSuffix).get().getSpec().getQuotas();
        LOGGER.debug("Check altered KafkaUser {} new quotas.", userNameWithSuffix);
        assertThat(kuqAfter.getRequestPercentage(), is(requestsPercentage));
        assertThat(kuqAfter.getConsumerByteRate(), is(consumerRate));
        assertThat(kuqAfter.getProducerByteRate(), is(producerRate));
        assertThat(kuqAfter.getControllerMutationRate(), is(mutationRate));
    }
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) KafkaUserQuotas(io.strimzi.api.kafka.model.KafkaUserQuotas)

Aggregations

Condition (io.strimzi.api.kafka.model.status.Condition)132 Test (org.junit.jupiter.api.Test)58 Kafka (io.strimzi.api.kafka.model.Kafka)46 HashMap (java.util.HashMap)41 EphemeralStorage (io.strimzi.api.kafka.model.storage.EphemeralStorage)36 Collections (java.util.Collections)32 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)32 Vertx (io.vertx.core.Vertx)28 Map (java.util.Map)27 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)26 Labels (io.strimzi.operator.common.model.Labels)26 Future (io.vertx.core.Future)26 BeforeAll (org.junit.jupiter.api.BeforeAll)26 CoreMatchers.is (org.hamcrest.CoreMatchers.is)24 Promise (io.vertx.core.Promise)20 Checkpoint (io.vertx.junit5.Checkpoint)20 VertxExtension (io.vertx.junit5.VertxExtension)20 VertxTestContext (io.vertx.junit5.VertxTestContext)20 List (java.util.List)20 AfterAll (org.junit.jupiter.api.AfterAll)20