Search in sources :

Example 1 with TestUtils

use of io.strimzi.test.TestUtils in project strimzi by strimzi.

the class TracingST method deployJaegerInstance.

/**
 * Install of Jaeger instance
 */
void deployJaegerInstance(ExtensionContext extensionContext, String namespaceName) {
    LOGGER.info("=== Applying jaeger instance install file ===");
    String instanceYamlContent = TestUtils.getContent(new File(jaegerInstancePath), TestUtils::toYamlString);
    cmdKubeClient(namespaceName).applyContent(instanceYamlContent.replaceAll("image: 'jaegertracing/all-in-one:*'", "image: 'jaegertracing/all-in-one:" + JAEGER_VERSION.substring(0, 4) + "'"));
    ResourceManager.STORED_RESOURCES.computeIfAbsent(extensionContext.getDisplayName(), k -> new Stack<>());
    ResourceManager.STORED_RESOURCES.get(extensionContext.getDisplayName()).push(new ResourceItem(() -> cmdKubeClient(namespaceName).deleteContent(instanceYamlContent)));
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, JAEGER_INSTANCE_NAME, 1);
}
Also used : TestUtils(io.strimzi.test.TestUtils) ResourceItem(io.strimzi.systemtest.resources.ResourceItem) File(java.io.File)

Example 2 with TestUtils

use of io.strimzi.test.TestUtils in project strimzi-kafka-operator by strimzi.

the class TracingST method deployJaegerInstance.

/**
 * Install of Jaeger instance
 */
void deployJaegerInstance(ExtensionContext extensionContext, String namespaceName) {
    LOGGER.info("=== Applying jaeger instance install file ===");
    String instanceYamlContent = TestUtils.getContent(new File(jaegerInstancePath), TestUtils::toYamlString);
    cmdKubeClient(namespaceName).applyContent(instanceYamlContent.replaceAll("image: 'jaegertracing/all-in-one:*'", "image: 'jaegertracing/all-in-one:" + JAEGER_VERSION.substring(0, 4) + "'"));
    ResourceManager.STORED_RESOURCES.computeIfAbsent(extensionContext.getDisplayName(), k -> new Stack<>());
    ResourceManager.STORED_RESOURCES.get(extensionContext.getDisplayName()).push(new ResourceItem(() -> cmdKubeClient(namespaceName).deleteContent(instanceYamlContent)));
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, JAEGER_INSTANCE_NAME, 1);
}
Also used : TestUtils(io.strimzi.test.TestUtils) ResourceItem(io.strimzi.systemtest.resources.ResourceItem) File(java.io.File)

Example 3 with TestUtils

use of io.strimzi.test.TestUtils in project strimzi by strimzi.

the class AbstractUpgradeST method setupEnvAndUpgradeClusterOperator.

protected void setupEnvAndUpgradeClusterOperator(ExtensionContext extensionContext, JsonObject testParameters, String producerName, String consumerName, String continuousTopicName, String continuousConsumerGroup, String kafkaVersion, String namespace) throws IOException {
    int continuousClientsMessageCount = testParameters.getJsonObject("client").getInteger("continuousClientsMessages");
    LOGGER.info("Test upgrade of ClusterOperator from version {} to version {}", testParameters.getString("fromVersion"), testParameters.getString("toVersion"));
    cluster.setNamespace(namespace);
    String operatorVersion = testParameters.getString("fromVersion");
    String url = null;
    File dir = null;
    if ("HEAD".equals(testParameters.getString("fromVersion"))) {
        coDir = new File(TestUtils.USER_PATH + "/../packaging/install/cluster-operator");
    } else {
        url = testParameters.getString("urlFrom");
        dir = FileUtils.downloadAndUnzip(url);
        coDir = new File(dir, testParameters.getString("fromExamples") + "/install/cluster-operator/");
    }
    // Modify + apply installation files
    copyModifyApply(coDir, namespace, extensionContext, testParameters.getString("strimziFeatureGatesFlagsBefore"));
    LOGGER.info("Waiting for {} deployment", ResourceManager.getCoDeploymentName());
    DeploymentUtils.waitForDeploymentAndPodsReady(ResourceManager.getCoDeploymentName(), 1);
    LOGGER.info("{} is ready", ResourceManager.getCoDeploymentName());
    if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL, operatorVersion)).contains(clusterName)) {
        // Deploy a Kafka cluster
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withVersion(kafkaVersion).addToConfig("log.message.format.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).messageVersion()).addToConfig("inter.broker.protocol.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).protocolVersion()).endKafka().endSpec().build());
        } else {
            kafkaYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/kafka/kafka-persistent.yaml");
            LOGGER.info("Deploy Kafka from: {}", kafkaYaml.getPath());
            // Change kafka version of it's empty (null is for remove the version)
            cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, kafkaVersion));
            // Wait for readiness
            waitForReadinessOfKafkaCluster();
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion)).contains(userName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
        } else {
            kafkaUserYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/user/kafka-user.yaml");
            LOGGER.info("Deploy KafkaUser from: {}", kafkaUserYaml.getPath());
            cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization"));
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion), userName);
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(topicName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
        } else {
            kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
            LOGGER.info("Deploy KafkaTopic from: {}", kafkaTopicYaml.getPath());
            cmdKubeClient().create(kafkaTopicYaml);
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), topicName);
        }
    }
    // Create bunch of topics for upgrade if it's specified in configuration
    if (testParameters.getBoolean("generateTopics")) {
        for (int x = 0; x < upgradeTopicCount; x++) {
            if ("HEAD".equals(testParameters.getString("fromVersion"))) {
                resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, topicName + "-" + x, 1, 1, 1).editSpec().withTopicName(topicName + "-" + x).endSpec().build());
            } else {
                kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
                cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + topicName + "-" + x + "\""));
            }
        }
    }
    if (continuousClientsMessageCount != 0) {
        // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
        if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(continuousTopicName)) {
            String pathToTopicExamples = testParameters.getString("fromExamples").equals("HEAD") ? PATH_TO_KAFKA_TOPIC_CONFIG : testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml";
            kafkaTopicYaml = new File(dir, pathToTopicExamples);
            cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + continuousTopicName + "\"").replace("partitions: 1", "partitions: 3").replace("replicas: 1", "replicas: 3") + "    min.insync.replicas: 2");
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), continuousTopicName);
        }
        String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
        KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroup).withDelayMs(1000).build();
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // ##############################
    }
    makeSnapshots();
    logPodImages(clusterName);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) TestUtils(io.strimzi.test.TestUtils) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) File(java.io.File)

Example 4 with TestUtils

use of io.strimzi.test.TestUtils in project strimzi-kafka-operator by strimzi.

the class AbstractUpgradeST method setupEnvAndUpgradeClusterOperator.

protected void setupEnvAndUpgradeClusterOperator(ExtensionContext extensionContext, JsonObject testParameters, String producerName, String consumerName, String continuousTopicName, String continuousConsumerGroup, String kafkaVersion, String namespace) throws IOException {
    int continuousClientsMessageCount = testParameters.getJsonObject("client").getInteger("continuousClientsMessages");
    LOGGER.info("Test upgrade of ClusterOperator from version {} to version {}", testParameters.getString("fromVersion"), testParameters.getString("toVersion"));
    cluster.setNamespace(namespace);
    String operatorVersion = testParameters.getString("fromVersion");
    String url = null;
    File dir = null;
    if ("HEAD".equals(testParameters.getString("fromVersion"))) {
        coDir = new File(TestUtils.USER_PATH + "/../packaging/install/cluster-operator");
    } else {
        url = testParameters.getString("urlFrom");
        dir = FileUtils.downloadAndUnzip(url);
        coDir = new File(dir, testParameters.getString("fromExamples") + "/install/cluster-operator/");
    }
    // Modify + apply installation files
    copyModifyApply(coDir, namespace, extensionContext, testParameters.getString("strimziFeatureGatesFlagsBefore"));
    LOGGER.info("Waiting for {} deployment", ResourceManager.getCoDeploymentName());
    DeploymentUtils.waitForDeploymentAndPodsReady(ResourceManager.getCoDeploymentName(), 1);
    LOGGER.info("{} is ready", ResourceManager.getCoDeploymentName());
    if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL, operatorVersion)).contains(clusterName)) {
        // Deploy a Kafka cluster
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withVersion(kafkaVersion).addToConfig("log.message.format.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).messageVersion()).addToConfig("inter.broker.protocol.version", TestKafkaVersion.getSpecificVersion(kafkaVersion).protocolVersion()).endKafka().endSpec().build());
        } else {
            kafkaYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/kafka/kafka-persistent.yaml");
            LOGGER.info("Deploy Kafka from: {}", kafkaYaml.getPath());
            // Change kafka version of it's empty (null is for remove the version)
            cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, kafkaVersion));
            // Wait for readiness
            waitForReadinessOfKafkaCluster();
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion)).contains(userName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, userName).build());
        } else {
            kafkaUserYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/user/kafka-user.yaml");
            LOGGER.info("Deploy KafkaUser from: {}", kafkaUserYaml.getPath());
            cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization"));
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL, operatorVersion), userName);
        }
    }
    if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(topicName)) {
        if ("HEAD".equals(testParameters.getString("fromVersion"))) {
            resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
        } else {
            kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
            LOGGER.info("Deploy KafkaTopic from: {}", kafkaTopicYaml.getPath());
            cmdKubeClient().create(kafkaTopicYaml);
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), topicName);
        }
    }
    // Create bunch of topics for upgrade if it's specified in configuration
    if (testParameters.getBoolean("generateTopics")) {
        for (int x = 0; x < upgradeTopicCount; x++) {
            if ("HEAD".equals(testParameters.getString("fromVersion"))) {
                resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, topicName + "-" + x, 1, 1, 1).editSpec().withTopicName(topicName + "-" + x).endSpec().build());
            } else {
                kafkaTopicYaml = new File(dir, testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml");
                cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + topicName + "-" + x + "\""));
            }
        }
    }
    if (continuousClientsMessageCount != 0) {
        // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update
        if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion)).contains(continuousTopicName)) {
            String pathToTopicExamples = testParameters.getString("fromExamples").equals("HEAD") ? PATH_TO_KAFKA_TOPIC_CONFIG : testParameters.getString("fromExamples") + "/examples/topic/kafka-topic.yaml";
            kafkaTopicYaml = new File(dir, pathToTopicExamples);
            cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace("name: \"my-topic\"", "name: \"" + continuousTopicName + "\"").replace("partitions: 1", "partitions: 3").replace("replicas: 1", "replicas: 3") + "    min.insync.replicas: 2");
            ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL, operatorVersion), continuousTopicName);
        }
        String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
        KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(continuousTopicName).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroup).withDelayMs(1000).build();
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
        resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
    // ##############################
    }
    makeSnapshots();
    logPodImages(clusterName);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) TestUtils(io.strimzi.test.TestUtils) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) File(java.io.File)

Aggregations

TestUtils (io.strimzi.test.TestUtils)4 File (java.io.File)4 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)2 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)2 ResourceItem (io.strimzi.systemtest.resources.ResourceItem)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2