use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class ConnectBuilderIsolatedST method testBuildPluginUsingMavenCoordinatesArtifacts.
@ParallelTest
void testBuildPluginUsingMavenCoordinatesArtifacts(ExtensionContext extensionContext) {
final String connectClusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String imageName = getImageNameForTestCase();
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String connectorName = connectClusterName + "-camel-connector";
final String consumerName = mapWithKafkaClientNames.get(extensionContext.getDisplayName()) + "-consumer";
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(INFRA_NAMESPACE, topicName).build(), KafkaConnectTemplates.kafkaConnect(extensionContext, connectClusterName, INFRA_NAMESPACE, INFRA_NAMESPACE, 1, false).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewBuild().withPlugins(PLUGIN_WITH_MAVEN_TYPE).withNewDockerOutput().withImage(imageName).endDockerOutput().endBuild().endSpec().build());
Map<String, Object> connectorConfig = new HashMap<>();
connectorConfig.put("topics", topicName);
connectorConfig.put("camel.source.path.timerName", "timer");
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(connectorName, connectClusterName).editOrNewSpec().withClassName(CAMEL_CONNECTOR_TIMER_CLASS_NAME).withConfig(connectorConfig).endSpec().build());
KafkaClients kafkaClient = new KafkaClientsBuilder().withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(INFRA_NAMESPACE)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
resourceManager.createResource(extensionContext, kafkaClient.consumerStrimzi());
ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class ConfigProviderST method testConnectWithConnectorUsingConfigAndEnvProvider.
@ParallelNamespaceTest
void testConnectWithConnectorUsingConfigAndEnvProvider(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String producerName = "producer-" + ClientUtils.generateRandomConsumerGroup();
final String customFileSinkPath = "/tmp/my-own-path.txt";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
Map<String, String> configData = new HashMap<>();
configData.put("topics", topicName);
configData.put("file", customFileSinkPath);
configData.put("key", "org.apache.kafka.connect.storage.StringConverter");
configData.put("value", "org.apache.kafka.connect.storage.StringConverter");
String cmName = "connector-config";
String configRoleName = "connector-config-role";
ConfigMap connectorConfig = new ConfigMapBuilder().editOrNewMetadata().withName(cmName).endMetadata().withData(configData).build();
kubeClient().getClient().configMaps().inNamespace(namespaceName).create(connectorConfig);
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, false).editOrNewMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("config.providers", "configmaps,env").addToConfig("config.providers.configmaps.class", "io.strimzi.kafka.KubernetesConfigMapConfigProvider").addToConfig("config.providers.env.class", "io.strimzi.kafka.EnvVarConfigProvider").editOrNewExternalConfiguration().addNewEnv().withName("FILE_SINK_FILE").withNewValueFrom().withNewConfigMapKeyRef("file", cmName, false).endValueFrom().endEnv().endExternalConfiguration().endSpec().build());
LOGGER.info("Creating needed RoleBinding and Role for Kubernetes Config Provider");
ResourceManager.getInstance().createResource(extensionContext, new RoleBindingBuilder().editOrNewMetadata().withName("connector-config-rb").withNamespace(namespaceName).endMetadata().withSubjects(new SubjectBuilder().withKind("ServiceAccount").withName(clusterName + "-connect").withNamespace(namespaceName).build()).withRoleRef(new RoleRefBuilder().withKind("Role").withName(configRoleName).withApiGroup("rbac.authorization.k8s.io").build()).build());
// create a role
Role configRole = new RoleBuilder().editOrNewMetadata().withName(configRoleName).withNamespace(namespaceName).endMetadata().addNewRule().withApiGroups("").withResources("configmaps").withResourceNames(cmName).withVerbs("get").endRule().build();
kubeClient().getClient().resource(configRole).createOrReplace();
String configPrefix = "configmaps:" + namespaceName + "/connector-config:";
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", "${env:FILE_SINK_FILE}").addToConfig("key.converter", "${" + configPrefix + "key}").addToConfig("value.converter", "${" + configPrefix + "value}").addToConfig("topics", "${" + configPrefix + "topics}").endSpec().build());
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
String kafkaConnectPodName = kubeClient().listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, customFileSinkPath, "Hello-world - 99");
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class QuotasST method testKafkaQuotasPluginIntegration.
/**
* Test to check Kafka Quotas Plugin for disk space
*/
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaQuotasPluginIntegration(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String producerName = "quotas-producer";
final String consumerName = "quotas-consumer";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1).editSpec().editKafka().addToConfig("client.quota.callback.class", "io.strimzi.kafka.quotas.StaticQuotaCallback").addToConfig("client.quota.callback.static.storage.hard", "55000000").addToConfig("client.quota.callback.static.storage.soft", "50000000").addToConfig("client.quota.callback.static.storage.check-interval", "5").withNewPersistentClaimStorage().withSize("1Gi").endPersistentClaimStorage().endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
// Send more messages than disk can store to see if the integration works
KafkaClients basicClients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(100000000).withDelayMs(0).withMessage(String.join("", Collections.nCopies(1000, "#"))).build();
resourceManager.createResource(extensionContext, basicClients.producerStrimzi());
// Kafka Quotas Plugin should stop producer in around 10-20 seconds with configured throughput
assertThrows(WaitException.class, () -> JobUtils.waitForJobFailure(producerName, INFRA_NAMESPACE, 120_000));
String kafkaLog = kubeClient(namespaceName).logs(KafkaResources.kafkaPodName(clusterName, 0));
String softLimitLog = "disk is beyond soft limit";
String hardLimitLog = "disk is full";
assertThat("Kafka log doesn't contain '" + softLimitLog + "' log", kafkaLog, CoreMatchers.containsString(softLimitLog));
assertThat("Kafka log doesn't contain '" + hardLimitLog + "' log", kafkaLog, CoreMatchers.containsString(hardLimitLog));
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class FeatureGatesIsolatedST method testSwitchingStrimziPodSetFeatureGateOnAndOff.
@IsolatedTest
void testSwitchingStrimziPodSetFeatureGateOnAndOff(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
int zkReplicas = 3;
int kafkaReplicas = 3;
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
int messageCount = 500;
List<EnvVar> coEnvVars = new ArrayList<>();
coEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-UseStrimziPodSets", null));
LOGGER.info("Deploying CO with STS - SPS is disabled");
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(coEnvVars).createInstallation().runInstallation();
LOGGER.info("Deploying Kafka");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, kafkaReplicas, zkReplicas).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(INFRA_NAMESPACE, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(INFRA_NAMESPACE, zkSelector);
Map<String, String> coPod = DeploymentUtils.depSnapshot(ResourceManager.getCoDeploymentName());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(1000).withNamespaceName(INFRA_NAMESPACE).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
LOGGER.info("Changing FG env variable to enable SPS");
coEnvVars = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME).getSpec().getTemplate().getSpec().getContainers().get(0).getEnv();
coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("+UseStrimziPodSets");
Deployment coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
coPod = DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
zkPods = RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
KafkaUtils.waitForKafkaReady(clusterName);
LOGGER.info("Changing FG env variable to disable again SPS");
coEnvVars.stream().filter(env -> env.getName().equals(Environment.STRIMZI_FEATURE_GATES_ENV)).findFirst().get().setValue("");
coDep = kubeClient().getDeployment(Constants.STRIMZI_DEPLOYMENT_NAME);
coDep.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(coEnvVars);
kubeClient().getClient().apps().deployments().inNamespace(INFRA_NAMESPACE).withName(Constants.STRIMZI_DEPLOYMENT_NAME).replace(coDep);
DeploymentUtils.waitTillDepHasRolled(Constants.STRIMZI_DEPLOYMENT_NAME, 1, coPod);
RollingUpdateUtils.waitTillComponentHasRolled(zkSelector, zkReplicas, zkPods);
RollingUpdateUtils.waitTillComponentHasRolled(kafkaSelector, kafkaReplicas, kafkaPods);
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, INFRA_NAMESPACE, messageCount);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients in project strimzi-kafka-operator by strimzi.
the class MirrorMaker2IsolatedST method testRestoreOffsetsInConsumerGroup.
@ParallelNamespaceTest
@SuppressWarnings({ "checkstyle:MethodLength" })
void testRestoreOffsetsInConsumerGroup(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String kafkaClusterSourceName = clusterName + "-source";
final String kafkaClusterTargetName = clusterName + "-target";
final String syncGroupOffsetsIntervalSeconds = "1";
final String topicSourceNameMirrored = "test-sync-offset-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicTargetNameMirrored = kafkaClusterSourceName + "." + topicSourceNameMirrored;
final String consumerGroup = "mm2-test-consumer-group";
final String sourceProducerName = "mm2-producer-source-" + ClientUtils.generateRandomConsumerGroup();
final String sourceConsumerName = "mm2-consumer-source-" + ClientUtils.generateRandomConsumerGroup();
final String targetProducerName = "mm2-producer-target-" + ClientUtils.generateRandomConsumerGroup();
final String targetConsumerName = "mm2-consumer-target-" + ClientUtils.generateRandomConsumerGroup();
final String mm2SrcTrgName = clusterName + "-src-trg";
final String mm2TrgSrcName = clusterName + "-trg-src";
resourceManager.createResource(extensionContext, false, // Deploy source kafka
KafkaTemplates.kafkaPersistent(kafkaClusterSourceName, 1, 1).build(), // Deploy target kafka
KafkaTemplates.kafkaPersistent(kafkaClusterTargetName, 1, 1).build());
// Wait for Kafka clusters readiness
KafkaUtils.waitForKafkaReady(namespaceName, kafkaClusterSourceName);
KafkaUtils.waitForKafkaReady(namespaceName, kafkaClusterTargetName);
resourceManager.createResource(extensionContext, // *.replication.factor(s) to 1 are added just to speed up test by using only 1 ZK and 1 Kafka
KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mm2TrgSrcName, kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "1").addToConfig("replication.factor", "1").addToConfig("offset-syncs.topic.replication.factor", "1").endSourceConnector().editCheckpointConnector().addToConfig("refresh.groups.interval.seconds", "1").addToConfig("sync.group.offsets.enabled", "true").addToConfig("sync.group.offsets.interval.seconds", syncGroupOffsetsIntervalSeconds).addToConfig("emit.checkpoints.enabled", "true").addToConfig("emit.checkpoints.interval.seconds", "1").addToConfig("checkpoints.topic.replication.factor", "1").endCheckpointConnector().editHeartbeatConnector().addToConfig("heartbeats.topic.replication.factor", "1").endHeartbeatConnector().withTopicsPattern(".*").withGroupsPattern(".*").endMirror().endSpec().build(), // MM2 Active (S) <-> Active (T) // direction S <- T mirroring
KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mm2SrcTrgName, kafkaClusterSourceName, kafkaClusterTargetName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "1").addToConfig("replication.factor", "1").addToConfig("offset-syncs.topic.replication.factor", "1").endSourceConnector().editCheckpointConnector().addToConfig("refresh.groups.interval.seconds", "1").addToConfig("sync.group.offsets.enabled", "true").addToConfig("sync.group.offsets.interval.seconds", syncGroupOffsetsIntervalSeconds).addToConfig("emit.checkpoints.enabled", "true").addToConfig("emit.checkpoints.interval.seconds", "1").addToConfig("checkpoints.topic.replication.factor", "1").endCheckpointConnector().editHeartbeatConnector().addToConfig("heartbeats.topic.replication.factor", "1").endHeartbeatConnector().withTopicsPattern(".*").withGroupsPattern(".*").endMirror().endSpec().build(), // deploy topic
KafkaTopicTemplates.topic(kafkaClusterSourceName, topicSourceNameMirrored, 3).build());
KafkaClients initialInternalClientSourceJob = new KafkaClientsBuilder().withProducerName(sourceProducerName).withConsumerName(sourceConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withTopicName(topicSourceNameMirrored).withMessageCount(MESSAGE_COUNT).withMessage("Producer A").withConsumerGroup(consumerGroup).withNamespaceName(namespaceName).build();
KafkaClients initialInternalClientTargetJob = new KafkaClientsBuilder().withProducerName(targetProducerName).withConsumerName(targetConsumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).withTopicName(topicTargetNameMirrored).withMessageCount(MESSAGE_COUNT).withConsumerGroup(consumerGroup).withNamespaceName(namespaceName).build();
LOGGER.info("Send & receive {} messages to/from Source cluster.", MESSAGE_COUNT);
resourceManager.createResource(extensionContext, initialInternalClientSourceJob.producerStrimzi(), initialInternalClientSourceJob.consumerStrimzi());
ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, MESSAGE_COUNT);
ClientUtils.waitForClientSuccess(sourceConsumerName, namespaceName, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
JobUtils.deleteJobWithWait(namespaceName, sourceConsumerName);
LOGGER.info("Send {} messages to Source cluster.", MESSAGE_COUNT);
KafkaClients internalClientSourceJob = new KafkaClientsBuilder(initialInternalClientSourceJob).withMessage("Producer B").build();
resourceManager.createResource(extensionContext, internalClientSourceJob.producerStrimzi());
ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, MESSAGE_COUNT);
LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
LOGGER.info("Receive {} messages from mirrored topic on Target cluster.", MESSAGE_COUNT);
resourceManager.createResource(extensionContext, initialInternalClientTargetJob.consumerStrimzi());
ClientUtils.waitForClientSuccess(targetConsumerName, namespaceName, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
JobUtils.deleteJobWithWait(namespaceName, targetConsumerName);
LOGGER.info("Send 50 messages to Source cluster");
internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(50).withMessage("Producer C").build();
resourceManager.createResource(extensionContext, internalClientSourceJob.producerStrimzi());
ClientUtils.waitForClientSuccess(sourceProducerName, namespaceName, 50);
JobUtils.deleteJobWithWait(namespaceName, sourceProducerName);
LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
LOGGER.info("Receive 10 msgs from source cluster");
internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(10).withAdditionalConfig("max.poll.records=10").build();
resourceManager.createResource(extensionContext, internalClientSourceJob.consumerStrimzi());
ClientUtils.waitForClientSuccess(sourceConsumerName, namespaceName, 10);
JobUtils.deleteJobWithWait(namespaceName, sourceConsumerName);
LOGGER.info("Wait 1 second as 'sync.group.offsets.interval.seconds=1'. As this is insignificant wait, we're skipping it");
LOGGER.info("Receive 40 msgs from mirrored topic on Target cluster");
KafkaClients internalClientTargetJob = new KafkaClientsBuilder(initialInternalClientTargetJob).withMessageCount(40).build();
resourceManager.createResource(extensionContext, internalClientTargetJob.consumerStrimzi());
ClientUtils.waitForClientSuccess(targetConsumerName, namespaceName, 40);
JobUtils.deleteJobWithWait(namespaceName, targetConsumerName);
LOGGER.info("There should be no more messages to read. Try to consume at least 1 message. " + "This client job should fail on timeout.");
resourceManager.createResource(extensionContext, initialInternalClientTargetJob.consumerStrimzi());
assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(targetConsumerName, namespaceName, 1));
LOGGER.info("As it's Active-Active MM2 mode, there should be no more messages to read from Source cluster" + " topic. This client job should fail on timeout.");
resourceManager.createResource(extensionContext, initialInternalClientSourceJob.consumerStrimzi());
assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(sourceConsumerName, namespaceName, 1));
}
Aggregations