Search in sources :

Example 76 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMaker2IsolatedST method testMirrorMaker2.

@SuppressWarnings({ "checkstyle:MethodLength" })
@ParallelNamespaceTest
void testMirrorMaker2(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaClusterSourceName = clusterName + "-source";
    String kafkaClusterTargetName = clusterName + "-target";
    String sourceTopicName = "availability-topic-source-" + mapWithTestTopics.get(extensionContext.getDisplayName());
    String targetTopicName = "availability-topic-target-" + mapWithTestTopics.get(extensionContext.getDisplayName());
    Map<String, Object> expectedConfig = StUtils.loadProperties("group.id=mirrormaker2-cluster\n" + "key.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "value.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "header.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "config.storage.topic=mirrormaker2-cluster-configs\n" + "status.storage.topic=mirrormaker2-cluster-status\n" + "offset.storage.topic=mirrormaker2-cluster-offsets\n" + "config.storage.replication.factor=-1\n" + "status.storage.replication.factor=-1\n" + "offset.storage.replication.factor=-1\n" + "config.providers=file\n" + "config.providers.file.class=org.apache.kafka.common.config.provider.FileConfigProvider\n");
    String topicSourceName = MIRRORMAKER2_TOPIC_NAME + "-" + rng.nextInt(Integer.MAX_VALUE);
    String topicTargetName = kafkaClusterSourceName + "." + topicSourceName;
    String topicSourceNameMirrored = kafkaClusterSourceName + "." + sourceTopicName;
    // Deploy source kafka
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build());
    // Deploy target kafka
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
    // Deploy Topic
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicSourceName, 3).build());
    resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(namespaceName, false, kafkaClientsName).build());
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(sourceTopicName).withNamespaceName(namespaceName).withClusterName(kafkaClusterSourceName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
    // Check brokers availability
    LOGGER.info("Sending messages to - topic {}, cluster {} and message count of {}", sourceTopicName, kafkaClusterSourceName, MESSAGE_COUNT);
    internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
    LOGGER.info("Setting topic to {}, cluster to {} and changing consumer group", targetTopicName, kafkaClusterTargetName);
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(targetTopicName).withClusterName(kafkaClusterTargetName).build();
    LOGGER.info("Sending messages to - topic {}, cluster {} and message count of {}", targetTopicName, kafkaClusterTargetName, MESSAGE_COUNT);
    internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
    resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(clusterName, kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "60").endSourceConnector().endMirror().endSpec().build());
    LOGGER.info("Looks like the mirrormaker2 cluster my-cluster deployed OK");
    String podName = PodUtils.getPodNameByPrefix(namespaceName, KafkaMirrorMaker2Resources.deploymentName(clusterName));
    String kafkaPodJson = TestUtils.toJsonString(kubeClient(namespaceName).getPod(namespaceName, podName));
    assertThat(kafkaPodJson, hasJsonPath(StUtils.globalVariableJsonPathBuilder(0, "KAFKA_CONNECT_BOOTSTRAP_SERVERS"), hasItem(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName))));
    assertThat(StUtils.getPropertiesFromJson(0, kafkaPodJson, "KAFKA_CONNECT_CONFIGURATION"), is(expectedConfig));
    testDockerImagesForKafkaMirrorMaker2(clusterName, INFRA_NAMESPACE, namespaceName);
    verifyLabelsOnPods(namespaceName, clusterName, "mirrormaker2", "KafkaMirrorMaker2");
    verifyLabelsForService(namespaceName, clusterName, "mirrormaker2-api", "KafkaMirrorMaker2");
    verifyLabelsForConfigMaps(namespaceName, kafkaClusterSourceName, null, kafkaClusterTargetName);
    verifyLabelsForServiceAccounts(namespaceName, kafkaClusterSourceName, null);
    LOGGER.info("Setting topic to {}, cluster to {} and changing consumer group", topicSourceName, kafkaClusterSourceName);
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicSourceName).withClusterName(kafkaClusterSourceName).build();
    LOGGER.info("Sending messages to - topic {}, cluster {} and message count of {}", topicSourceName, kafkaClusterSourceName, MESSAGE_COUNT);
    int sent = internalKafkaClient.sendMessagesPlain();
    LOGGER.info("Consumer in source cluster and topic should receive {} messages", MESSAGE_COUNT);
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    LOGGER.info("Now setting topic to {} and cluster to {} - the messages should be mirrored", topicTargetName, kafkaClusterTargetName);
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicTargetName).withClusterName(kafkaClusterTargetName).build();
    LOGGER.info("Consumer in target cluster and topic should receive {} messages", MESSAGE_COUNT);
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    LOGGER.info("Changing topic to {}", topicSourceNameMirrored);
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicSourceNameMirrored).build();
    LOGGER.info("Check if mm2 mirror automatically created topic");
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    LOGGER.info("Mirrored successful");
    KafkaTopic mirroredTopic = KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicTargetName).get();
    assertThat(mirroredTopic.getSpec().getPartitions(), is(3));
    assertThat(mirroredTopic.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(kafkaClusterTargetName));
    // Replace source topic resource with new data and check that mm2 update target topi
    KafkaTopicResource.replaceTopicResourceInSpecificNamespace(topicSourceName, kt -> kt.getSpec().setPartitions(8), namespaceName);
    KafkaTopicUtils.waitForKafkaTopicPartitionChange(namespaceName, topicTargetName, 8);
}
Also used : GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) JobBuilder(io.fabric8.kubernetes.api.model.batch.v1.JobBuilder) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaMirrorMaker2ClusterSpecBuilder(io.strimzi.api.kafka.model.KafkaMirrorMaker2ClusterSpecBuilder) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 77 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMaker2IsolatedST method testConfigureDeploymentStrategy.

@ParallelNamespaceTest
void testConfigureDeploymentStrategy(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaClusterSourceName = clusterName + "-source";
    String kafkaClusterTargetName = clusterName + "-target";
    // Deploy source kafka
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build());
    // Deploy target kafka
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
    resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(clusterName, kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editOrNewTemplate().editOrNewDeployment().withDeploymentStrategy(DeploymentStrategy.RECREATE).endDeployment().endTemplate().endSpec().build());
    String mm2DepName = KafkaMirrorMaker2Resources.deploymentName(clusterName);
    LOGGER.info("Adding label to MirrorMaker2 resource, the CR should be recreated");
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> mm2.getMetadata().setLabels(Collections.singletonMap("some", "label")), namespaceName);
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, mm2DepName, 1);
    KafkaMirrorMaker2 kmm2 = KafkaMirrorMaker2Resource.kafkaMirrorMaker2Client().inNamespace(namespaceName).withName(clusterName).get();
    LOGGER.info("Checking that observed gen. is still on 1 (recreation) and new label is present");
    assertThat(kmm2.getStatus().getObservedGeneration(), is(1L));
    assertThat(kmm2.getMetadata().getLabels().toString(), containsString("some=label"));
    assertThat(kmm2.getSpec().getTemplate().getDeployment().getDeploymentStrategy(), is(DeploymentStrategy.RECREATE));
    LOGGER.info("Changing deployment strategy to {}", DeploymentStrategy.ROLLING_UPDATE);
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> mm2.getSpec().getTemplate().getDeployment().setDeploymentStrategy(DeploymentStrategy.ROLLING_UPDATE), namespaceName);
    KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2Ready(namespaceName, clusterName);
    LOGGER.info("Adding another label to MirrorMaker2 resource, pods should be rolled");
    KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2ResourceInSpecificNamespace(clusterName, mm2 -> mm2.getMetadata().getLabels().put("another", "label"), namespaceName);
    DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, mm2DepName, 1);
    LOGGER.info("Checking that observed gen. higher (rolling update) and label is changed");
    kmm2 = KafkaMirrorMaker2Resource.kafkaMirrorMaker2Client().inNamespace(namespaceName).withName(clusterName).get();
    assertThat(kmm2.getStatus().getObservedGeneration(), is(2L));
    assertThat(kmm2.getMetadata().getLabels().toString(), containsString("another=label"));
    assertThat(kmm2.getSpec().getTemplate().getDeployment().getDeploymentStrategy(), is(DeploymentStrategy.ROLLING_UPDATE));
}
Also used : Matchers.containsString(org.hamcrest.Matchers.containsString) KafkaMirrorMaker2(io.strimzi.api.kafka.model.KafkaMirrorMaker2) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 78 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMakerIsolatedST method testMirrorMakerTlsScramSha.

/**
 * Test mirroring messages by Mirror Maker over tls transport using scram-sha auth
 */
@ParallelNamespaceTest
@SuppressWarnings("checkstyle:methodlength")
void testMirrorMakerTlsScramSha(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    String kafkaClusterSourceName = clusterName + "-source";
    String kafkaClusterTargetName = clusterName + "-target";
    String kafkaUserSource = clusterName + "-my-user-source";
    String kafkaUserTarget = clusterName + "-my-user-target";
    // Deploy source kafka with tls listener and SCRAM-SHA authentication
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
    // Deploy target kafka with tls listener and SCRAM-SHA authentication
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(new KafkaListenerAuthenticationScramSha512()).build()).endKafka().endSpec().build());
    // Deploy topic
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicName).build());
    // createAndWaitForReadiness Kafka user for source cluster
    KafkaUser userSource = KafkaUserTemplates.scramShaUser(kafkaClusterSourceName, kafkaUserSource).build();
    // createAndWaitForReadiness Kafka user for target cluster
    KafkaUser userTarget = KafkaUserTemplates.scramShaUser(kafkaClusterTargetName, kafkaUserTarget).build();
    resourceManager.createResource(extensionContext, userSource);
    resourceManager.createResource(extensionContext, userTarget);
    // Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
    PasswordSecretSource passwordSecretSource = new PasswordSecretSource();
    passwordSecretSource.setSecretName(kafkaUserSource);
    passwordSecretSource.setPassword("password");
    // Initialize PasswordSecretSource to set this as PasswordSecret in Mirror Maker spec
    PasswordSecretSource passwordSecretTarget = new PasswordSecretSource();
    passwordSecretTarget.setSecretName(kafkaUserTarget);
    passwordSecretTarget.setPassword("password");
    // Initialize CertSecretSource with certificate and secret names for consumer
    CertSecretSource certSecretSource = new CertSecretSource();
    certSecretSource.setCertificate("ca.crt");
    certSecretSource.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterSourceName));
    // Initialize CertSecretSource with certificate and secret names for producer
    CertSecretSource certSecretTarget = new CertSecretSource();
    certSecretTarget.setCertificate("ca.crt");
    certSecretTarget.setSecretName(KafkaResources.clusterCaCertificateSecretName(kafkaClusterTargetName));
    // Deploy client
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, true, clusterName + "-" + Constants.KAFKA_CLIENTS, userSource, userTarget).build());
    final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
    String baseTopic = mapWithTestTopics.get(extensionContext.getDisplayName());
    String topicTestName1 = baseTopic + "-test-1";
    String topicTestName2 = baseTopic + "-test-2";
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicTestName1).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicTestName2).build());
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicTestName1).withNamespaceName(namespaceName).withClusterName(kafkaClusterSourceName).withKafkaUsername(userSource.getMetadata().getName()).withMessageCount(messagesCount).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
    // Check brokers availability
    internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicTestName2).withClusterName(kafkaClusterTargetName).withKafkaUsername(userTarget.getMetadata().getName()).build();
    internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
    // Deploy Mirror Maker with TLS and ScramSha512
    resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, true).editSpec().editConsumer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaUserSource).withPasswordSecret(passwordSecretSource).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretSource).endTls().endConsumer().editProducer().withNewKafkaClientAuthenticationScramSha512().withUsername(kafkaUserTarget).withPasswordSecret(passwordSecretTarget).endKafkaClientAuthenticationScramSha512().withNewTls().withTrustedCertificates(certSecretTarget).endTls().endProducer().endSpec().build());
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicName).withClusterName(kafkaClusterSourceName).withKafkaUsername(userSource.getMetadata().getName()).build();
    internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
    InternalKafkaClient newInternalKafkaClient = internalKafkaClient.toBuilder().withClusterName(kafkaClusterTargetName).withKafkaUsername(userTarget.getMetadata().getName()).build();
    newInternalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(internalKafkaClient.getMessageCount());
}
Also used : KafkaListenerAuthenticationScramSha512(io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationScramSha512) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) PasswordSecretSource(io.strimzi.api.kafka.model.PasswordSecretSource) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) CertSecretSource(io.strimzi.api.kafka.model.CertSecretSource) KafkaUser(io.strimzi.api.kafka.model.KafkaUser) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 79 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMakerIsolatedST method testIncludeList.

@ParallelNamespaceTest
void testIncludeList(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    String kafkaClusterSourceName = clusterName + "-source";
    String kafkaClusterTargetName = clusterName + "-target";
    String topicName = "included-topic";
    String topicNotIncluded = "not-included-topic";
    LOGGER.info("Creating kafka source cluster {}", kafkaClusterSourceName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build());
    LOGGER.info("Creating kafka target cluster {}", kafkaClusterTargetName);
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicName).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicNotIncluded).build());
    resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
    String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
    InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName("topic-example-10").withNamespaceName(namespaceName).withClusterName(kafkaClusterSourceName).withMessageCount(messagesCount).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
    // Check brokers availability
    internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName("topic-example-11").withClusterName(kafkaClusterTargetName).build();
    internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
    resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, false).editMetadata().withNamespace(namespaceName).endMetadata().editSpec().withInclude(topicName).endSpec().build());
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicName).withClusterName(kafkaClusterSourceName).build();
    int sent = internalKafkaClient.sendMessagesPlain();
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    internalKafkaClient = internalKafkaClient.toBuilder().withClusterName(kafkaClusterTargetName).build();
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicNotIncluded).withClusterName(kafkaClusterSourceName).build();
    sent = internalKafkaClient.sendMessagesPlain();
    internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
    internalKafkaClient = internalKafkaClient.toBuilder().withClusterName(kafkaClusterTargetName).build();
    assertThat("Received 0 messages in target kafka because topic " + topicNotIncluded + " is not included", internalKafkaClient.receiveMessagesPlain(), is(0));
}
Also used : GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) ResourceRequirementsBuilder(io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder) InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 80 with ParallelNamespaceTest

use of io.strimzi.systemtest.annotations.ParallelNamespaceTest in project strimzi-kafka-operator by strimzi.

the class MirrorMakerIsolatedST method testCustomAndUpdatedValues.

@ParallelNamespaceTest
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().withNewEntityOperator().endEntityOperator().endSpec().build());
    String usedVariable = "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER";
    LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
    envVarGeneral.put("TEST_ENV_1", "test.env.one");
    envVarGeneral.put("TEST_ENV_2", "test.env.two");
    envVarGeneral.put(usedVariable, "test.value");
    LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
    envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
    envVarUpdated.put("TEST_ENV_3", "test.env.three");
    Map<String, Object> producerConfig = new HashMap<>();
    producerConfig.put("acks", "all");
    Map<String, Object> updatedProducerConfig = new HashMap<>();
    updatedProducerConfig.put("acks", "0");
    Map<String, Object> consumerConfig = new HashMap<>();
    consumerConfig.put("auto.offset.reset", "latest");
    Map<String, Object> updatedConsumerConfig = new HashMap<>();
    updatedConsumerConfig.put("auto.offset.reset", "earliest");
    int initialDelaySeconds = 30;
    int timeoutSeconds = 10;
    int updatedInitialDelaySeconds = 31;
    int updatedTimeoutSeconds = 11;
    int periodSeconds = 10;
    int successThreshold = 1;
    int failureThreshold = 3;
    int updatedPeriodSeconds = 5;
    int updatedFailureThreshold = 1;
    resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, clusterName, clusterName, ClientUtils.generateRandomConsumerGroup(), 1, false).editSpec().editProducer().withConfig(producerConfig).endProducer().editConsumer().withConfig(consumerConfig).endConsumer().withNewTemplate().withNewMirrorMakerContainer().withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)).endMirrorMakerContainer().endTemplate().withNewReadinessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endReadinessProbe().withNewLivenessProbe().withInitialDelaySeconds(initialDelaySeconds).withTimeoutSeconds(timeoutSeconds).withPeriodSeconds(periodSeconds).withSuccessThreshold(successThreshold).withFailureThreshold(failureThreshold).endLivenessProbe().endSpec().build());
    Map<String, String> mirrorMakerSnapshot = DeploymentUtils.depSnapshot(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName));
    // Remove variable which is already in use
    envVarGeneral.remove(usedVariable);
    LOGGER.info("Verify values before update");
    checkReadinessLivenessProbe(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold);
    checkSpecificVariablesInContainer(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), envVarGeneral);
    checkComponentConfiguration(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", producerConfig);
    checkComponentConfiguration(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", consumerConfig);
    LOGGER.info("Check if actual env variable {} has different value than {}", usedVariable, "test.value");
    assertThat(StUtils.checkEnvVarInPod(namespaceName, kubeClient().listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getName(), usedVariable), CoreMatchers.is(not("test.value")));
    LOGGER.info("Updating values in MirrorMaker container");
    KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(clusterName, kmm -> {
        kmm.getSpec().getTemplate().getMirrorMakerContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
        kmm.getSpec().getProducer().setConfig(updatedProducerConfig);
        kmm.getSpec().getConsumer().setConfig(updatedConsumerConfig);
        kmm.getSpec().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
        kmm.getSpec().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
        kmm.getSpec().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
        kmm.getSpec().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
        kmm.getSpec().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
        kmm.getSpec().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
        kmm.getSpec().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
        kmm.getSpec().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
    }, namespaceName);
    DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), 1, mirrorMakerSnapshot);
    LOGGER.info("Verify values after update");
    checkReadinessLivenessProbe(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
    checkSpecificVariablesInContainer(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), envVarUpdated);
    checkComponentConfiguration(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", updatedProducerConfig);
    checkComponentConfiguration(namespaceName, KafkaMirrorMakerResources.deploymentName(clusterName), KafkaMirrorMakerResources.deploymentName(clusterName), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", updatedConsumerConfig);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) LinkedHashMap(java.util.LinkedHashMap) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)302 Tag (org.junit.jupiter.api.Tag)166 Matchers.containsString (org.hamcrest.Matchers.containsString)148 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)114 InternalKafkaClient (io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient)112 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)94 KafkaUser (io.strimzi.api.kafka.model.KafkaUser)86 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)76 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)66 HashMap (java.util.HashMap)52 ResourceRequirementsBuilder (io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder)50 TestUtils.fromYamlString (io.strimzi.test.TestUtils.fromYamlString)48 Matchers.emptyOrNullString (org.hamcrest.Matchers.emptyOrNullString)48 ExternalKafkaClient (io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient)42 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)40 ConfigMapBuilder (io.fabric8.kubernetes.api.model.ConfigMapBuilder)40 Secret (io.fabric8.kubernetes.api.model.Secret)40 ConfigMapKeySelectorBuilder (io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder)38 KafkaListenerAuthenticationTls (io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls)36 Pod (io.fabric8.kubernetes.api.model.Pod)32