Search in sources :

Example 16 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.

the class ConnectBuilderIsolatedST method testBuildPluginUsingMavenCoordinatesArtifacts.

@ParallelTest
void testBuildPluginUsingMavenCoordinatesArtifacts(ExtensionContext extensionContext) {
    final String connectClusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String imageName = getImageNameForTestCase();
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    final String connectorName = connectClusterName + "-camel-connector";
    final String consumerName = mapWithKafkaClientNames.get(extensionContext.getDisplayName()) + "-consumer";
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(INFRA_NAMESPACE, topicName).build(), KafkaConnectTemplates.kafkaConnect(extensionContext, connectClusterName, INFRA_NAMESPACE, INFRA_NAMESPACE, 1, false).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewBuild().withPlugins(PLUGIN_WITH_MAVEN_TYPE).withNewDockerOutput().withImage(imageName).endDockerOutput().endBuild().endSpec().build());
    Map<String, Object> connectorConfig = new HashMap<>();
    connectorConfig.put("topics", topicName);
    connectorConfig.put("camel.source.path.timerName", "timer");
    resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(connectorName, connectClusterName).editOrNewSpec().withClassName(CAMEL_CONNECTOR_TIMER_CLASS_NAME).withConfig(connectorConfig).endSpec().build());
    KafkaClients kafkaClient = new KafkaClientsBuilder().withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(INFRA_NAMESPACE)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
    resourceManager.createResource(extensionContext, kafkaClient.consumerStrimzi());
    ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) HashMap(java.util.HashMap) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 17 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.

the class ConfigProviderST method testConnectWithConnectorUsingConfigAndEnvProvider.

@ParallelNamespaceTest
void testConnectWithConnectorUsingConfigAndEnvProvider(ExtensionContext extensionContext) {
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String producerName = "producer-" + ClientUtils.generateRandomConsumerGroup();
    final String customFileSinkPath = "/tmp/my-own-path.txt";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
    Map<String, String> configData = new HashMap<>();
    configData.put("topics", topicName);
    configData.put("file", customFileSinkPath);
    configData.put("key", "org.apache.kafka.connect.storage.StringConverter");
    configData.put("value", "org.apache.kafka.connect.storage.StringConverter");
    String cmName = "connector-config";
    String configRoleName = "connector-config-role";
    ConfigMap connectorConfig = new ConfigMapBuilder().editOrNewMetadata().withName(cmName).endMetadata().withData(configData).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).create(connectorConfig);
    resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, false).editOrNewMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("config.providers", "configmaps,env").addToConfig("config.providers.configmaps.class", "io.strimzi.kafka.KubernetesConfigMapConfigProvider").addToConfig("config.providers.env.class", "io.strimzi.kafka.EnvVarConfigProvider").editOrNewExternalConfiguration().addNewEnv().withName("FILE_SINK_FILE").withNewValueFrom().withNewConfigMapKeyRef("file", cmName, false).endValueFrom().endEnv().endExternalConfiguration().endSpec().build());
    LOGGER.info("Creating needed RoleBinding and Role for Kubernetes Config Provider");
    ResourceManager.getInstance().createResource(extensionContext, new RoleBindingBuilder().editOrNewMetadata().withName("connector-config-rb").withNamespace(namespaceName).endMetadata().withSubjects(new SubjectBuilder().withKind("ServiceAccount").withName(clusterName + "-connect").withNamespace(namespaceName).build()).withRoleRef(new RoleRefBuilder().withKind("Role").withName(configRoleName).withApiGroup("rbac.authorization.k8s.io").build()).build());
    // create a role
    Role configRole = new RoleBuilder().editOrNewMetadata().withName(configRoleName).withNamespace(namespaceName).endMetadata().addNewRule().withApiGroups("").withResources("configmaps").withResourceNames(cmName).withVerbs("get").endRule().build();
    kubeClient().getClient().resource(configRole).createOrReplace();
    String configPrefix = "configmaps:" + namespaceName + "/connector-config:";
    resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", "${env:FILE_SINK_FILE}").addToConfig("key.converter", "${" + configPrefix + "key}").addToConfig("value.converter", "${" + configPrefix + "value}").addToConfig("topics", "${" + configPrefix + "topics}").endSpec().build());
    KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).withNamespaceName(namespaceName).build();
    resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
    String kafkaConnectPodName = kubeClient().listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
    KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, customFileSinkPath, "Hello-world - 99");
}
Also used : Role(io.fabric8.kubernetes.api.model.rbac.Role) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) RoleBindingBuilder(io.fabric8.kubernetes.api.model.rbac.RoleBindingBuilder) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) HashMap(java.util.HashMap) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) SubjectBuilder(io.fabric8.kubernetes.api.model.rbac.SubjectBuilder) RoleBuilder(io.fabric8.kubernetes.api.model.rbac.RoleBuilder) RoleRefBuilder(io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 18 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.

the class QuotasST method testKafkaQuotasPluginIntegration.

/**
 * Test to check Kafka Quotas Plugin for disk space
 */
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaQuotasPluginIntegration(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
    final String producerName = "quotas-producer";
    final String consumerName = "quotas-consumer";
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1).editSpec().editKafka().addToConfig("client.quota.callback.class", "io.strimzi.kafka.quotas.StaticQuotaCallback").addToConfig("client.quota.callback.static.storage.hard", "55000000").addToConfig("client.quota.callback.static.storage.soft", "50000000").addToConfig("client.quota.callback.static.storage.check-interval", "5").withNewPersistentClaimStorage().withSize("1Gi").endPersistentClaimStorage().endKafka().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    // Send more messages than disk can store to see if the integration works
    KafkaClients basicClients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(100000000).withDelayMs(0).withMessage(String.join("", Collections.nCopies(1000, "#"))).build();
    resourceManager.createResource(extensionContext, basicClients.producerStrimzi());
    // Kafka Quotas Plugin should stop producer in around 10-20 seconds with configured throughput
    assertThrows(WaitException.class, () -> JobUtils.waitForJobFailure(producerName, INFRA_NAMESPACE, 120_000));
    String kafkaLog = kubeClient(namespaceName).logs(KafkaResources.kafkaPodName(clusterName, 0));
    String softLimitLog = "disk is beyond soft limit";
    String hardLimitLog = "disk is full";
    assertThat("Kafka log doesn't contain '" + softLimitLog + "' log", kafkaLog, CoreMatchers.containsString(softLimitLog));
    assertThat("Kafka log doesn't contain '" + hardLimitLog + "' log", kafkaLog, CoreMatchers.containsString(hardLimitLog));
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest) Tag(org.junit.jupiter.api.Tag)

Example 19 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.

the class FeatureGatesIsolatedST method testSwitchingStrimziPodSetFeatureGateOnAndOff.

@IsolatedTest
void testSwitchingStrimziPodSetFeatureGateOnAndOff(ExtensionContext extensionContext) {
    assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    int zkReplicas = 3;
    int kafkaReplicas = 3;
    final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    int messageCount = 500;
    List<EnvVar> coEnvVars = new ArrayList<>();
    coEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-UseStrimziPodSets", null));
    LOGGER.info("Deploying CO with STS - SPS is disabled");
    clusterOperator.unInstall();
    clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(coEnvVars).createInstallation().runInstallation();
    LOGGER.info("Deploying Kafka");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, kafkaReplicas, zkReplicas).build());
    resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
    Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
    Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
    Map<String, String> coPod = DeploymentUtils.depSnapshot(ResourceManager.getCoDeploymentName());
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(1000).withNamespaceName(INFRA_NAMESPACE).build();
    resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
    LOGGER.info("Changing FG env variable to enable SPS");
    coEnvVars = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME).getSpec().getTemplate().getSpec().getContainers().get(0).getEnv();
    coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("+UseStrimziPodSets");
    Deployment coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
    coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
    kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
    coPod = DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
    zkPods = RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
    kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
    KafkaUtils.waitForKafkaReady(clusterName);
    LOGGER.info("Changing FG env variable to disable again SPS");
    coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("");
    coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
    coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
    kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
    DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
    RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
    RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
    ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, INFRA_NAMESPACE, messageCount);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) AbstractST(io.strimzi.systemtest.AbstractST) Environment(io.strimzi.systemtest.Environment) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) KafkaResource(io.strimzi.systemtest.resources.crd.KafkaResource) Annotations(io.strimzi.operator.common.Annotations) Random(java.util.Random) KafkaTopicUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) INFRA_NAMESPACE(io.strimzi.systemtest.Constants.INFRA_NAMESPACE) ArrayList(java.util.ArrayList) PodUtils(io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils) PodBuilder(io.fabric8.kubernetes.api.model.PodBuilder) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Assumptions.assumeFalse(org.junit.jupiter.api.Assumptions.assumeFalse) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) KafkaTemplates(io.strimzi.systemtest.templates.crd.KafkaTemplates) BeforeAllOnce(io.strimzi.systemtest.BeforeAllOnce) RollingUpdateUtils(io.strimzi.systemtest.utils.RollingUpdateUtils) KafkaUtils(io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils) IsolatedSuite(io.strimzi.systemtest.annotations.IsolatedSuite) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) JobUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.JobUtils) Constants(io.strimzi.systemtest.Constants) Pod(io.fabric8.kubernetes.api.model.Pod) KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) INTERNAL_CLIENTS_USED(io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ClientUtils(io.strimzi.systemtest.utils.ClientUtils) KubeClusterResource.kubeClient(io.strimzi.test.k8s.KubeClusterResource.kubeClient) ContainerPort(io.fabric8.kubernetes.api.model.ContainerPort) List(java.util.List) KafkaTopic(io.strimzi.api.kafka.model.KafkaTopic) DeploymentUtils(io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils) Logger(org.apache.logging.log4j.Logger) ResourceManager(io.strimzi.systemtest.resources.ResourceManager) KafkaTopicTemplates(io.strimzi.systemtest.templates.crd.KafkaTopicTemplates) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) LogManager(org.apache.logging.log4j.LogManager) REGRESSION(io.strimzi.systemtest.Constants.REGRESSION) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) SetupClusterOperator(io.strimzi.systemtest.resources.operator.SetupClusterOperator) ArrayList(java.util.ArrayList) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) Random(java.util.Random) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 20 with KafkaClients

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.

the class MirrorMaker2IsolatedST method testRestoreOffsetsInConsumerGroup.

@ParallelNamespaceTest
@SuppressWarnings({ "checkstyle:MethodLength" })
void testRestoreOffsetsInConsumerGroup(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final String kafkaClusterSourceName = clusterName + "-source";
    final String kafkaClusterTargetName = clusterName + "-target";
    final String syncGroupOffsetsIntervalSeconds = "1";
    final String topicSourceNameMirrored = "test-sync-offset-" + new Random().nextInt(Integer.MAX_VALUE);
    final String topicTargetNameMirrored = kafkaClusterSourceName + "." + topicSourceNameMirrored;
    final String consumerGroup = "mm2-test-consumer-group";
    final String sourceProducerName = "mm2-producer-source-" + ClientUtils.generateRandomConsumerGroup();
    final String sourceConsumerName = "mm2-consumer-source-" + ClientUtils.generateRandomConsumerGroup();
    final String targetProducerName = "mm2-producer-target-" + ClientUtils.generateRandomConsumerGroup();
    final String targetConsumerName = "mm2-consumer-target-" + ClientUtils.generateRandomConsumerGroup();
    final String mm2SrcTrgName = clusterName + "-src-trg";
    final String mm2TrgSrcName = clusterName + "-trg-src";
    resourceManager.createResource(extensionContext, false, // Deploy source kafka
    KafkaTemplates.kafkaPersistent(kafkaClusterSourceName, 1, 1).build(), // Deploy target kafka
    KafkaTemplates.kafkaPersistent(kafkaClusterTargetName, 1, 1).build());
    // Wait for Kafka clusters readiness
    KafkaUtils.waitForKafkaReady(namespaceName, kafkaClusterSourceName);
    KafkaUtils.waitForKafkaReady(namespaceName, kafkaClusterTargetName);
    resourceManager.createResource(extensionContext, // *.replication.factor(s) to 1 are added just to speed up test by using only 1 ZK and 1 Kafka
    KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mm2TrgSrcName, kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "1").addToConfig("replication.factor", "1").addToConfig("offset-syncs.topic.replication.factor", "1").endSourceConnector().editCheckpointConnector().addToConfig("refresh.groups.interval.seconds", "1").addToConfig("sync.group.offsets.enabled", "true").addToConfig("sync.group.offsets.interval.seconds", syncGroupOffsetsIntervalSeconds).addToConfig("emit.checkpoints.enabled", "true").addToConfig("emit.checkpoints.interval.seconds", "1").addToConfig("checkpoints.topic.replication.factor", "1").endCheckpointConnector().editHeartbeatConnector().addToConfig("heartbeats.topic.replication.factor", "1").endHeartbeatConnector().withTopicsPattern(".*").withGroupsPattern(".*").endMirror().endSpec().build(), // MM2 Active (S) <-> Active (T) // direction S <- T mirroring
    KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mm2SrcTrgName, kafkaClusterSourceName, kafkaClusterTargetName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "1").addToConfig("replication.factor", "1").addToConfig("offset-syncs.topic.replication.factor", "1").endSourceConnector().editCheckpointConnector().addToConfig("refresh.groups.interval.seconds", "1").addToConfig("sync.group.offsets.enabled", "true").addToConfig("sync.group.offsets.interval.seconds", syncGroupOffsetsIntervalSeconds).addToConfig("emit.checkpoints.enabled", "true").addToConfig("emit.checkpoints.interval.seconds", "1").addToConfig("checkpoints.topic.replication.factor", "1").endCheckpointConnector().editHeartbeatConnector().addToConfig("heartbeats.topic.replication.factor", "1").endHeartbeatConnector().withTopicsPattern(".*").withGroupsPattern(".*").endMirror().endSpec().build(), // deploy topic
    KafkaTopicTemplates.topic(kafkaClusterSourceName, topicSourceNameMirrored, 3).build());
    KafkaClients initialInternalClientSourceJob = new KafkaClientsBuilder().withProducerName(sourceProducerName).withConsumerName(sourceConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withTopicName(topicSourceNameMirrored).withMessageCount(MESSAGE_COUNT).withMessage("Producer A").withConsumerGroup(consumerGroup).withNamespaceName(namespaceName).build();
    KafkaClients initialInternalClientTargetJob = new KafkaClientsBuilder().withProducerName(targetProducerName).withConsumerName(targetConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).withTopicName(topicTargetNameMirrored).withMessageCount(MESSAGE_COUNT).withConsumerGroup(consumerGroup).withNamespaceName(namespaceName).build();
    LOGGER.info("Send & receive {} messages to/from Source cluster.", MESSAGE_COUNT);
    resourceManager.createResource(extensionContext, initialInternalClientSourceJob.producerStrimzi(), initialInternalClientSourceJob.consumerStrimzi());
    ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, MESSAGE_COUNT);
    ClientUtils.waitForClientSuccess(sourceConsumerName, namespaceName, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
    JobUtils.deleteJobWithWait(namespaceName, sourceConsumerName);
    LOGGER.info("Send {} messages to Source cluster.", MESSAGE_COUNT);
    KafkaClients internalClientSourceJob = new KafkaClientsBuilder(initialInternalClientSourceJob).withMessage("Producer B").build();
    resourceManager.createResource(extensionContext, internalClientSourceJob.producerStrimzi());
    ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, MESSAGE_COUNT);
    LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
    LOGGER.info("Receive {} messages from mirrored topic on Target cluster.", MESSAGE_COUNT);
    resourceManager.createResource(extensionContext, initialInternalClientTargetJob.consumerStrimzi());
    ClientUtils.waitForClientSuccess(targetConsumerName, namespaceName, MESSAGE_COUNT);
    JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
    JobUtils.deleteJobWithWait(namespaceName, targetConsumerName);
    LOGGER.info("Send 50 messages to Source cluster");
    internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(50).withMessage("Producer C").build();
    resourceManager.createResource(extensionContext, internalClientSourceJob.producerStrimzi());
    ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, 50);
    JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
    LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
    LOGGER.info("Receive 10 msgs from source cluster");
    internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(10).withAdditionalConfig("max.poll.records=10").build();
    resourceManager.createResource(extensionContext, internalClientSourceJob.consumerStrimzi());
    ClientUtils.waitForClientSuccess(sourceConsumerName, namespaceName, 10);
    JobUtils.deleteJobWithWait(namespaceName, sourceConsumerName);
    LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
    LOGGER.info("Receive 40 msgs from mirrored topic on Target cluster");
    KafkaClients internalClientTargetJob = new KafkaClientsBuilder(initialInternalClientTargetJob).withMessageCount(40).build();
    resourceManager.createResource(extensionContext, internalClientTargetJob.consumerStrimzi());
    ClientUtils.waitForClientSuccess(targetConsumerName, namespaceName, 40);
    JobUtils.deleteJobWithWait(namespaceName, targetConsumerName);
    LOGGER.info("There should be no more messages to read. Try to consume at least 1 message. " + "This client job should fail on timeout.");
    resourceManager.createResource(extensionContext, initialInternalClientTargetJob.consumerStrimzi());
    assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(targetConsumerName, namespaceName, 1));
    LOGGER.info("As it's Active-Active MM2 mode, there should be no more messages to read from Source cluster" + " topic. This client job should fail on timeout.");
    resourceManager.createResource(extensionContext, initialInternalClientSourceJob.consumerStrimzi());
    assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(sourceConsumerName, namespaceName, 1));
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) Random(java.util.Random) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Aggregations

KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)40 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)40 Tag (org.junit.jupiter.api.Tag)22 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)12 KafkaResources (io.strimzi.api.kafka.model.KafkaResources)12 IsolatedTest (io.strimzi.systemtest.annotations.IsolatedTest)12 ParallelNamespaceTest (io.strimzi.systemtest.annotations.ParallelNamespaceTest)12 KafkaTemplates (io.strimzi.systemtest.templates.crd.KafkaTemplates)12 KafkaTopicTemplates (io.strimzi.systemtest.templates.crd.KafkaTopicTemplates)12 ClientUtils (io.strimzi.systemtest.utils.ClientUtils)12 List (java.util.List)12 ExtensionContext (org.junit.jupiter.api.extension.ExtensionContext)12 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)10 AbstractST (io.strimzi.systemtest.AbstractST)10 REGRESSION (io.strimzi.systemtest.Constants.REGRESSION)10 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)10 Random (java.util.Random)10 LogManager (org.apache.logging.log4j.LogManager)10 Logger (org.apache.logging.log4j.Logger)10 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)8