use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi by strimzi.
the class ClusterOperationIsolatedST method testAvailabilityDuringNodeDrain.
@IsolatedTest
@MultiNodeClusterOnly
@RequiredMinKubeApiVersion(version = 1.15)
void testAvailabilityDuringNodeDrain(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> "test-topic-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-producer-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-consumer-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
int continuousClientsMessageCount = 300;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editOrNewSpec().editEntityOperator().editUserOperator().withReconciliationIntervalSeconds(30).endUserOperator().endEntityOperator().endSpec().build());
topicNames.forEach(topicName -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 3, 2).build()));
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
KafkaClients kafkaBasicClientResource;
for (int i = 0; i < size; i++) {
kafkaBasicClientResource = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicNames.get(i)).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroups.get(i)).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicClientResource.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientResource.consumerStrimzi());
}
// ##############################
// Nodes draining
// ##############################
kubeClient().getClusterWorkers().forEach(node -> {
NodeUtils.drainNode(node.getMetadata().getName());
NodeUtils.cordonNode(node.getMetadata().getName(), true);
});
producerNames.forEach(producerName -> ClientUtils.waitTillContinuousClientsFinish(producerName, consumerNames.get(producerName.indexOf(producerName)), NAMESPACE, continuousClientsMessageCount));
producerNames.forEach(producerName -> kubeClient().deleteJob(producerName));
consumerNames.forEach(consumerName -> kubeClient().deleteJob(consumerName));
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.
the class HttpBridgeKafkaExternalListenersST method testWeirdUsername.
@SuppressWarnings({ "checkstyle:MethodLength" })
private void testWeirdUsername(ExtensionContext extensionContext, String weirdUserName, KafkaListenerAuthentication auth, KafkaBridgeSpec spec, SecurityProtocol securityProtocol) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withAuth(auth).build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).withAuth(auth).build()).endKafka().endSpec().build());
BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder().withProducerName(clusterName + "-" + producerName).withConsumerName(clusterName + "-" + consumerName).withBootstrapAddress(KafkaBridgeResources.serviceName(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withPort(Constants.HTTP_BRIDGE_DEFAULT_PORT).withNamespaceName(namespace).build();
// Create topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).editMetadata().withNamespace(namespace).endMetadata().build());
// Create user
if (auth.getType().equals(Constants.TLS_LISTENER_DEFAULT_NAME)) {
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterName, weirdUserName).editMetadata().withNamespace(namespace).endMetadata().build());
} else {
resourceManager.createResource(extensionContext, KafkaUserTemplates.scramShaUser(clusterName, weirdUserName).editMetadata().withNamespace(namespace).endMetadata().build());
}
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespace, true, kafkaClientsName).build());
// Deploy http bridge
resourceManager.createResource(extensionContext, KafkaBridgeTemplates.kafkaBridge(clusterName, KafkaResources.tlsBootstrapAddress(clusterName), 1).editMetadata().withNamespace(namespace).endMetadata().withNewSpecLike(spec).withBootstrapServers(KafkaResources.tlsBootstrapAddress(clusterName)).withNewHttp(Constants.HTTP_BRIDGE_DEFAULT_PORT).withNewConsumer().addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest").endConsumer().endSpec().build());
final Service service = KafkaBridgeUtils.createBridgeNodePortService(clusterName, namespace, BRIDGE_EXTERNAL_SERVICE);
ServiceResource.createServiceResource(extensionContext, service, namespace);
resourceManager.createResource(extensionContext, kafkaBridgeClientJob.consumerStrimziBridge());
final String kafkaProducerExternalName = "kafka-producer-external" + new Random().nextInt(Integer.MAX_VALUE);
final List<ListenerStatus> listenerStatusList = KafkaResource.kafkaClient().inNamespace(namespace).withName(clusterName).get().getStatus().getListeners();
final String externalBootstrapServers = listenerStatusList.stream().filter(listener -> listener.getType().equals(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)).findFirst().orElseThrow(RuntimeException::new).getBootstrapServers();
final KafkaClients externalKafkaProducer = new KafkaClientsBuilder().withProducerName(kafkaProducerExternalName).withBootstrapAddress(externalBootstrapServers).withNamespaceName(namespace).withTopicName(topicName).withMessageCount(100).build();
if (auth.getType().equals(Constants.TLS_LISTENER_DEFAULT_NAME)) {
// tls producer
resourceManager.createResource(extensionContext, externalKafkaProducer.producerTlsStrimzi(clusterName, weirdUserName));
} else {
// scram-sha producer
resourceManager.createResource(extensionContext, externalKafkaProducer.producerScramShaStrimzi(clusterName, weirdUserName));
}
ClientUtils.waitForClientSuccess(kafkaProducerExternalName, namespace, MESSAGE_COUNT);
// delete kafka producer job
JobUtils.deleteJobWithWait(namespace, kafkaProducerExternalName);
ClientUtils.waitForClientSuccess(clusterName + "-" + consumerName, namespace, MESSAGE_COUNT);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.
the class ConnectBuilderIsolatedST method testBuildPluginUsingMavenCoordinatesArtifacts.
@ParallelTest
void testBuildPluginUsingMavenCoordinatesArtifacts(ExtensionContext extensionContext) {
final String connectClusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String imageName = getImageNameForTestCase();
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String connectorName = connectClusterName + "-camel-connector";
final String consumerName = mapWithKafkaClientNames.get(extensionContext.getDisplayName()) + "-consumer";
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(INFRA_NAMESPACE, topicName).build(), KafkaConnectTemplates.kafkaConnect(extensionContext, connectClusterName, INFRA_NAMESPACE, INFRA_NAMESPACE, 1, false).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").withNewBuild().withPlugins(PLUGIN_WITH_MAVEN_TYPE).withNewDockerOutput().withImage(imageName).endDockerOutput().endBuild().endSpec().build());
Map<String, Object> connectorConfig = new HashMap<>();
connectorConfig.put("topics", topicName);
connectorConfig.put("camel.source.path.timerName", "timer");
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(connectorName, connectClusterName).editOrNewSpec().withClassName(CAMEL_CONNECTOR_TIMER_CLASS_NAME).withConfig(connectorConfig).endSpec().build());
KafkaClients kafkaClient = new KafkaClientsBuilder().withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(INFRA_NAMESPACE)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
resourceManager.createResource(extensionContext, kafkaClient.consumerStrimzi());
ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.
the class ConfigProviderST method testConnectWithConnectorUsingConfigAndEnvProvider.
@ParallelNamespaceTest
void testConnectWithConnectorUsingConfigAndEnvProvider(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String producerName = "producer-" + ClientUtils.generateRandomConsumerGroup();
final String customFileSinkPath = "/tmp/my-own-path.txt";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
Map<String, String> configData = new HashMap<>();
configData.put("topics", topicName);
configData.put("file", customFileSinkPath);
configData.put("key", "org.apache.kafka.connect.storage.StringConverter");
configData.put("value", "org.apache.kafka.connect.storage.StringConverter");
String cmName = "connector-config";
String configRoleName = "connector-config-role";
ConfigMap connectorConfig = new ConfigMapBuilder().editOrNewMetadata().withName(cmName).endMetadata().withData(configData).build();
kubeClient().getClient().configMaps().inNamespace(namespaceName).create(connectorConfig);
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1, false).editOrNewMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().editOrNewSpec().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("config.providers", "configmaps,env").addToConfig("config.providers.configmaps.class", "io.strimzi.kafka.KubernetesConfigMapConfigProvider").addToConfig("config.providers.env.class", "io.strimzi.kafka.EnvVarConfigProvider").editOrNewExternalConfiguration().addNewEnv().withName("FILE_SINK_FILE").withNewValueFrom().withNewConfigMapKeyRef("file", cmName, false).endValueFrom().endEnv().endExternalConfiguration().endSpec().build());
LOGGER.info("Creating needed RoleBinding and Role for Kubernetes Config Provider");
ResourceManager.getInstance().createResource(extensionContext, new RoleBindingBuilder().editOrNewMetadata().withName("connector-config-rb").withNamespace(namespaceName).endMetadata().withSubjects(new SubjectBuilder().withKind("ServiceAccount").withName(clusterName + "-connect").withNamespace(namespaceName).build()).withRoleRef(new RoleRefBuilder().withKind("Role").withName(configRoleName).withApiGroup("rbac.authorization.k8s.io").build()).build());
// create a role
Role configRole = new RoleBuilder().editOrNewMetadata().withName(configRoleName).withNamespace(namespaceName).endMetadata().addNewRule().withApiGroups("").withResources("configmaps").withResourceNames(cmName).withVerbs("get").endRule().build();
kubeClient().getClient().resource(configRole).createOrReplace();
String configPrefix = "configmaps:" + namespaceName + "/connector-config:";
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", "${env:FILE_SINK_FILE}").addToConfig("key.converter", "${" + configPrefix + "key}").addToConfig("value.converter", "${" + configPrefix + "value}").addToConfig("topics", "${" + configPrefix + "topics}").endSpec().build());
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withDelayMs(0).withNamespaceName(namespaceName).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
String kafkaConnectPodName = kubeClient().listPods(namespaceName, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(namespaceName, kafkaConnectPodName, customFileSinkPath, "Hello-world - 99");
}
use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder in project strimzi-kafka-operator by strimzi.
the class QuotasST method testKafkaQuotasPluginIntegration.
/**
* Test to check Kafka Quotas Plugin for disk space
*/
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testKafkaQuotasPluginIntegration(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String producerName = "quotas-producer";
final String consumerName = "quotas-consumer";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 1).editSpec().editKafka().addToConfig("client.quota.callback.class", "io.strimzi.kafka.quotas.StaticQuotaCallback").addToConfig("client.quota.callback.static.storage.hard", "55000000").addToConfig("client.quota.callback.static.storage.soft", "50000000").addToConfig("client.quota.callback.static.storage.check-interval", "5").withNewPersistentClaimStorage().withSize("1Gi").endPersistentClaimStorage().endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
// Send more messages than disk can store to see if the integration works
KafkaClients basicClients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(100000000).withDelayMs(0).withMessage(String.join("", Collections.nCopies(1000, "#"))).build();
resourceManager.createResource(extensionContext, basicClients.producerStrimzi());
// Kafka Quotas Plugin should stop producer in around 10-20 seconds with configured throughput
assertThrows(WaitException.class, () -> JobUtils.waitForJobFailure(producerName, INFRA_NAMESPACE, 120_000));
String kafkaLog = kubeClient(namespaceName).logs(KafkaResources.kafkaPodName(clusterName, 0));
String softLimitLog = "disk is beyond soft limit";
String hardLimitLog = "disk is full";
assertThat("Kafka log doesn't contain '" + softLimitLog + "' log", kafkaLog, CoreMatchers.containsString(softLimitLog));
assertThat("Kafka log doesn't contain '" + hardLimitLog + "' log", kafkaLog, CoreMatchers.containsString(hardLimitLog));
}
Aggregations