Search in sources :

Example 71 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class ResourceManager method logCurrentResourceStatus.

/**
 * Log actual status of custom resource with pods.
 * @param customResource - Kafka, KafkaConnect etc. - every resource that HasMetadata and HasStatus (Strimzi status)
 */
public static <T extends CustomResource<? extends Spec, ? extends Status>> void logCurrentResourceStatus(T customResource) {
    if (customResource != null) {
        List<String> printWholeCR = Arrays.asList(KafkaConnector.RESOURCE_KIND, KafkaTopic.RESOURCE_KIND, KafkaUser.RESOURCE_KIND);
        String kind = customResource.getKind();
        String name = customResource.getMetadata().getName();
        if (printWholeCR.contains(kind)) {
            LOGGER.info(customResource);
        } else {
            List<String> log = new ArrayList<>(asList(kind, " status:\n", "\nConditions:\n"));
            if (customResource.getStatus() != null) {
                List<Condition> conditions = customResource.getStatus().getConditions();
                if (conditions != null) {
                    for (Condition condition : customResource.getStatus().getConditions()) {
                        if (condition.getMessage() != null) {
                            log.add("\tType: " + condition.getType() + "\n");
                            log.add("\tMessage: " + condition.getMessage() + "\n");
                        }
                    }
                }
                log.add("\nPods with conditions and messages:\n\n");
                for (Pod pod : kubeClient().namespace(customResource.getMetadata().getNamespace()).listPodsByPrefixInName(name)) {
                    log.add(pod.getMetadata().getName() + ":");
                    for (PodCondition podCondition : pod.getStatus().getConditions()) {
                        if (podCondition.getMessage() != null) {
                            log.add("\n\tType: " + podCondition.getType() + "\n");
                            log.add("\tMessage: " + podCondition.getMessage() + "\n");
                        } else {
                            log.add("\n\tType: <EMPTY>\n");
                            log.add("\tMessage: <EMPTY>\n");
                        }
                    }
                    log.add("\n\n");
                }
                LOGGER.info("{}", String.join("", log).strip());
            }
        }
    }
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) PodCondition(io.fabric8.kubernetes.api.model.PodCondition) Pod(io.fabric8.kubernetes.api.model.Pod) ArrayList(java.util.ArrayList) PodCondition(io.fabric8.kubernetes.api.model.PodCondition)

Example 72 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class ConnectBuilderIsolatedST method testBuildFailsWithWrongChecksumOfArtifact.

@ParallelTest
void testBuildFailsWithWrongChecksumOfArtifact(ExtensionContext extensionContext) {
    TestStorage storage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
    final String imageName = getImageNameForTestCase();
    Plugin pluginWithWrongChecksum = new PluginBuilder().withName("connector-with-wrong-checksum").withArtifacts(new JarArtifactBuilder().withUrl(ECHO_SINK_JAR_URL).withSha512sum(ECHO_SINK_JAR_WRONG_CHECKSUM).build()).build();
    resourceManager.createResource(extensionContext, ScraperTemplates.scraperPod(storage.getNamespaceName(), storage.getScraperName()).build());
    resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnect(storage.getClusterName(), storage.getNamespaceName(), storage.getNamespaceName(), 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().withNewBuild().withPlugins(pluginWithWrongChecksum).withNewDockerOutput().withImage(imageName).endDockerOutput().endBuild().endSpec().build());
    KafkaConnectUtils.waitForConnectNotReady(storage.getClusterName());
    KafkaConnectUtils.waitUntilKafkaConnectStatusConditionContainsMessage(storage.getClusterName(), storage.getNamespaceName(), "The Kafka Connect build failed(.*)?");
    LOGGER.info("Checking if KafkaConnect status condition contains message about build failure");
    KafkaConnect kafkaConnect = KafkaConnectResource.kafkaConnectClient().inNamespace(storage.getNamespaceName()).withName(storage.getClusterName()).get();
    LOGGER.info("Deploying network policies for KafkaConnect");
    NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kafkaConnect, KafkaConnectResources.deploymentName(storage.getClusterName()));
    Condition connectCondition = kafkaConnect.getStatus().getConditions().stream().findFirst().orElseThrow();
    assertTrue(connectCondition.getMessage().matches("The Kafka Connect build failed(.*)?"));
    assertThat(connectCondition.getType(), is(NotReady.toString()));
    LOGGER.info("Replacing plugin's checksum with right one");
    KafkaConnectResource.replaceKafkaConnectResource(storage.getClusterName(), kC -> {
        Plugin pluginWithRightChecksum = new PluginBuilder().withName("connector-with-right-checksum").withArtifacts(new JarArtifactBuilder().withUrl(ECHO_SINK_JAR_URL).withSha512sum(ECHO_SINK_JAR_CHECKSUM).build()).build();
        kC.getSpec().getBuild().getPlugins().remove(0);
        kC.getSpec().getBuild().getPlugins().add(pluginWithRightChecksum);
    });
    KafkaConnectUtils.waitForConnectReady(storage.getClusterName());
    String scraperPodName = kubeClient(storage.getNamespaceName()).listPodsByPrefixInName(storage.getScraperName()).get(0).getMetadata().getName();
    LOGGER.info("Checking if KafkaConnect API contains EchoSink connector");
    String plugins = cmdKubeClient().execInPod(scraperPodName, "curl", "-X", "GET", "http://" + KafkaConnectResources.serviceName(storage.getClusterName()) + ":8083/connector-plugins").out();
    assertTrue(plugins.contains(ECHO_SINK_CLASS_NAME));
    LOGGER.info("Checking if KafkaConnect resource contains EchoSink connector in status");
    kafkaConnect = KafkaConnectResource.kafkaConnectClient().inNamespace(storage.getNamespaceName()).withName(storage.getClusterName()).get();
    assertTrue(kafkaConnect.getStatus().getConnectorPlugins().stream().anyMatch(connectorPlugin -> connectorPlugin.getConnectorClass().contains(ECHO_SINK_CLASS_NAME)));
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest) SANITY(io.strimzi.systemtest.Constants.SANITY) KubeClusterResource.cmdKubeClient(io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) Annotations(io.strimzi.operator.common.Annotations) Random(java.util.Random) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) KafkaConnectResource(io.strimzi.systemtest.resources.crd.KafkaConnectResource) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) OtherArtifactBuilder(io.strimzi.api.kafka.model.connect.build.OtherArtifactBuilder) KafkaConnectUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) ScraperTemplates(io.strimzi.systemtest.templates.specific.ScraperTemplates) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) ACCEPTANCE(io.strimzi.systemtest.Constants.ACCEPTANCE) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Collectors(java.util.stream.Collectors) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) Plugin(io.strimzi.api.kafka.model.connect.build.Plugin) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) KafkaConnectorTemplates(io.strimzi.systemtest.templates.crd.KafkaConnectorTemplates) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Matchers.is(org.hamcrest.Matchers.is) Condition(io.strimzi.api.kafka.model.status.Condition) AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) KafkaConnectTemplates(io.strimzi.systemtest.templates.crd.KafkaConnectTemplates) NetworkPolicyResource(io.strimzi.systemtest.resources.kubernetes.NetworkPolicyResource) CONNECT(io.strimzi.systemtest.Constants.CONNECT) OpenShiftOnly(io.strimzi.systemtest.annotations.OpenShiftOnly) HashMap(java.util.HashMap) PluginBuilder(io.strimzi.api.kafka.model.connect.build.PluginBuilder) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) ZipArtifactBuilder(io.strimzi.api.kafka.model.connect.build.ZipArtifactBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) KafkaConnectorResource(io.strimzi.systemtest.resources.crd.KafkaConnectorResource) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) NotReady(io.strimzi.systemtest.enums.CustomResourceStatus.NotReady) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TgzArtifactBuilder(io.strimzi.api.kafka.model.connect.build.TgzArtifactBuilder) ImageStream(io.fabric8.openshift.api.model.ImageStream) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) Ready(io.strimzi.systemtest.enums.CustomResourceStatus.Ready) Constants(io.strimzi.systemtest.Constants) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) OpenShiftClient(io.fabric8.openshift.client.OpenShiftClient) JarArtifactBuilder(io.strimzi.api.kafka.model.connect.build.JarArtifactBuilder) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) Util(io.strimzi.operator.common.Util) DeploymentUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils) CONNECT_COMPONENTS(io.strimzi.systemtest.Constants.CONNECT_COMPONENTS) ImageStreamBuilder(io.fabric8.openshift.api.model.ImageStreamBuilder) MavenArtifactBuilder(io.strimzi.api.kafka.model.connect.build.MavenArtifactBuilder) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaConnectResources(io.strimzi.api.kafka.model.KafkaConnectResources) TestStorage(io.strimzi.systemtest.storage.TestStorage) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) PluginBuilder(io.strimzi.api.kafka.model.connect.build.PluginBuilder) Plugin(io.strimzi.api.kafka.model.connect.build.Plugin) JarArtifactBuilder(io.strimzi.api.kafka.model.connect.build.JarArtifactBuilder) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 73 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class UserST method alterBigAmountOfUsers.

synchronized void alterBigAmountOfUsers(ExtensionContext extensionContext, String userName, String typeOfUser, int numberOfUsers, int producerRate, int consumerRate, int requestsPercentage, double mutationRate) {
    KafkaUserQuotas kuq = new KafkaUserQuotas();
    kuq.setConsumerByteRate(consumerRate);
    kuq.setProducerByteRate(producerRate);
    kuq.setRequestPercentage(requestsPercentage);
    kuq.setControllerMutationRate(mutationRate);
    LOGGER.info("Updating of existing KafkaUsers");
    for (int i = 0; i < numberOfUsers; i++) {
        String userNameWithSuffix = userName + "-" + i;
        if (typeOfUser.equals("TLS")) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().editSpec().withQuotas(kuq).endSpec().build());
        } else {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().editSpec().withQuotas(kuq).endSpec().build());
        }
        LOGGER.info("[After update] Checking status of KafkaUser {}", userNameWithSuffix);
        Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userNameWithSuffix).get().getStatus().getConditions().get(0);
        LOGGER.debug("KafkaUser condition status: {}", kafkaCondition.getStatus());
        LOGGER.debug("KafkaUser condition type: {}", kafkaCondition.getType());
        assertThat(kafkaCondition.getType(), is(Ready.toString()));
        LOGGER.debug("KafkaUser {} is in desired state: {}", userNameWithSuffix, kafkaCondition.getType());
        KafkaUserQuotas kuqAfter = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userNameWithSuffix).get().getSpec().getQuotas();
        LOGGER.debug("Check altered KafkaUser {} new quotas.", userNameWithSuffix);
        assertThat(kuqAfter.getRequestPercentage(), is(requestsPercentage));
        assertThat(kuqAfter.getConsumerByteRate(), is(consumerRate));
        assertThat(kuqAfter.getProducerByteRate(), is(producerRate));
        assertThat(kuqAfter.getControllerMutationRate(), is(mutationRate));
    }
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) KafkaUserQuotas(io.strimzi.api.kafka.model.KafkaUserQuotas)

Example 74 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class UserST method createBigAmountOfUsers.

synchronized void createBigAmountOfUsers(ExtensionContext extensionContext, String userName, String typeOfUser, int numberOfUsers) {
    LOGGER.info("Creating {} KafkaUsers", numberOfUsers);
    for (int i = 0; i < numberOfUsers; i++) {
        String userNameWithSuffix = userName + "-" + i;
        if (typeOfUser.equals("TLS")) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().build());
        } else {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(userClusterName, userNameWithSuffix).editMetadata().withNamespace(namespace).endMetadata().build());
        }
        LOGGER.debug("Checking status of KafkaUser {}", userNameWithSuffix);
        Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(namespace).withName(userNameWithSuffix).get().getStatus().getConditions().get(0);
        LOGGER.debug("KafkaUser condition status: {}", kafkaCondition.getStatus());
        LOGGER.debug("KafkaUser condition type: {}", kafkaCondition.getType());
        assertThat(kafkaCondition.getType(), is(Ready.toString()));
        LOGGER.debug("KafkaUser {} is in desired state: {}", userNameWithSuffix, kafkaCondition.getType());
    }
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition)

Example 75 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class CustomResourceStatusIsolatedST method testKafkaUserStatusNotReady.

@ParallelTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testKafkaUserStatusNotReady(ExtensionContext extensionContext) {
    // Simulate NotReady state with userName longer than 64 characters
    String userName = "sasl-use-rabcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
    resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
    KafkaUserUtils.waitForKafkaUserNotReady(clusterOperator.getDeploymentNamespace(), userName);
    LOGGER.info("Checking status of deployed KafkaUser {}", userName);
    Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(userName).get().getStatus().getConditions().get(0);
    LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
    LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
    LOGGER.info("KafkaUser Message: {}", kafkaCondition.getMessage());
    LOGGER.info("KafkaUser Reason: {}", kafkaCondition.getReason());
    assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(NotReady.toString()));
    LOGGER.info("KafkaUser {} is in desired state: {}", userName, kafkaCondition.getType());
    KafkaUserResource.kafkaUserClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(userName).delete();
    KafkaUserUtils.waitForKafkaUserDeletion(clusterOperator.getDeploymentNamespace(), userName);
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Aggregations

Condition (io.strimzi.api.kafka.model.status.Condition)150 Test (org.junit.jupiter.api.Test)70 Kafka (io.strimzi.api.kafka.model.Kafka)61 HashMap (java.util.HashMap)49 EphemeralStorage (io.strimzi.api.kafka.model.storage.EphemeralStorage)42 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)38 Collections (java.util.Collections)37 Vertx (io.vertx.core.Vertx)36 Map (java.util.Map)36 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)34 Labels (io.strimzi.operator.common.model.Labels)34 Future (io.vertx.core.Future)34 BeforeAll (org.junit.jupiter.api.BeforeAll)32 Reconciliation (io.strimzi.operator.common.Reconciliation)26 Checkpoint (io.vertx.junit5.Checkpoint)26 VertxExtension (io.vertx.junit5.VertxExtension)26 VertxTestContext (io.vertx.junit5.VertxTestContext)26 AfterAll (org.junit.jupiter.api.AfterAll)26 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)26 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)25