use of io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient in project strimzi by strimzi.
the class MirrorMakerIsolatedST method testMirrorMaker.
@ParallelNamespaceTest
void testMirrorMaker(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String kafkaClusterSourceName = clusterName + "-source";
String kafkaClusterTargetName = clusterName + "-target";
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
String topicSourceName = TOPIC_NAME + "-source" + "-" + rng.nextInt(Integer.MAX_VALUE);
// Deploy source kafka
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build());
// Deploy target kafka
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
// Deploy Topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, topicSourceName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespaceName, false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName("topic-for-test-broker-1").withNamespaceName(namespaceName).withClusterName(kafkaClusterSourceName).withMessageCount(messagesCount).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
// Check brokers availability
internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName("topic-for-test-broker-2").withClusterName(kafkaClusterTargetName).build();
internalKafkaClient.produceAndConsumesPlainMessagesUntilBothOperationsAreSuccessful();
// Deploy Mirror Maker
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(clusterName, kafkaClusterSourceName, kafkaClusterTargetName, ClientUtils.generateRandomConsumerGroup(), 1, false).editSpec().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("400M")).addToLimits("cpu", new Quantity("2")).addToRequests("memory", new Quantity("300M")).addToRequests("cpu", new Quantity("1")).build()).withNewJvmOptions().withXmx("200m").withXms("200m").withXx(jvmOptionsXX).endJvmOptions().endSpec().build());
verifyLabelsOnPods(namespaceName, clusterName, "mirror-maker", "KafkaMirrorMaker");
verifyLabelsForService(namespaceName, clusterName, "mirror-maker", "KafkaMirrorMaker");
verifyLabelsForConfigMaps(namespaceName, kafkaClusterSourceName, null, kafkaClusterTargetName);
verifyLabelsForServiceAccounts(namespaceName, kafkaClusterSourceName, null);
String mirrorMakerPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaMirrorMakerResources.deploymentName(clusterName)).get(0).getMetadata().getName();
String kafkaMirrorMakerLogs = kubeClient(namespaceName).logs(mirrorMakerPodName);
assertThat(kafkaMirrorMakerLogs, not(containsString("keytool error: java.io.FileNotFoundException: /opt/kafka/consumer-oauth-certs/**/* (No such file or directory)")));
String podName = kubeClient(namespaceName).listPodsByNamespace(namespaceName, clusterName).stream().filter(n -> n.getMetadata().getName().startsWith(KafkaMirrorMakerResources.deploymentName(clusterName))).findFirst().orElseThrow().getMetadata().getName();
assertResources(namespaceName, podName, clusterName.concat("-mirror-maker"), "400M", "2", "300M", "1");
assertExpectedJavaOpts(namespaceName, podName, KafkaMirrorMakerResources.deploymentName(clusterName), "-Xmx200m", "-Xms200m", "-XX:+UseG1GC");
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(topicSourceName).withClusterName(kafkaClusterSourceName).build();
int sent = internalKafkaClient.sendMessagesPlain();
internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
internalKafkaClient = internalKafkaClient.toBuilder().withClusterName(kafkaClusterTargetName).build();
internalKafkaClient.consumesPlainMessagesUntilOperationIsSuccessful(sent);
}
use of io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient in project strimzi by strimzi.
the class NamespaceDeletionRecoveryIsolatedST method testTopicNotAvailable.
/**
* In case we don't have KafkaTopic resources from before the cluster loss, we do these steps:
* 1. deploy the Kafka cluster without Topic Operator - otherwise topics will be deleted
* 2. delete KafkaTopic Store topics - `__strimzi-topic-operator-kstreams-topic-store-changelog` and `__strimzi_store_topic`
* 3. enable Topic Operator by redeploying Kafka cluster
* @throws InterruptedException - sleep
*/
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(INTERNAL_CLIENTS_USED)
void testTopicNotAvailable(ExtensionContext extensionContext) throws InterruptedException {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
prepareEnvironmentForRecovery(extensionContext, topicName);
// Wait till consumer offset topic is created
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix("consumer-offsets");
// Get list of topics and list of PVC needed for recovery
List<PersistentVolumeClaim> persistentVolumeClaimList = kubeClient().getClient().persistentVolumeClaims().list().getItems();
deleteAndRecreateNamespace();
recreatePvcAndUpdatePv(persistentVolumeClaimList);
recreateClusterOperator(extensionContext);
// Recreate Kafka Cluster
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endKafka().editZookeeper().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endZookeeper().withNewEntityOperator().endEntityOperator().endSpec().build());
// Wait some time after kafka is ready before delete topics files
Thread.sleep(60000);
// Remove all topic data from topic store
String deleteTopicStoreTopics = "./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi-topic-operator-kstreams-topic-store-changelog --delete " + "&& ./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi_store_topic --delete";
cmdKubeClient(INFRA_NAMESPACE).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", deleteTopicStoreTopics);
// Wait till exec result will be finish
Thread.sleep(30000);
KafkaResource.replaceKafkaResource(clusterName, k -> {
k.getSpec().setEntityOperator(new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().build());
});
DeploymentUtils.waitForDeploymentAndPodsReady(KafkaResources.entityOperatorDeploymentName(clusterName), 1);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(INFRA_NAMESPACE).withClusterName(CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", internalKafkaClient.getPodName());
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
use of io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient in project strimzi by strimzi.
the class AbstractNamespaceST method deployKafkaConnectorWithSink.
void deployKafkaConnectorWithSink(ExtensionContext extensionContext, String clusterName, String namespace, String topicName, String connectLabel, String sharedKafkaClusterName) {
// Deploy Kafka Connector
Map<String, Object> connectorConfig = new HashMap<>();
connectorConfig.put("topics", topicName);
connectorConfig.put("file", Constants.DEFAULT_SINK_FILE_PATH);
connectorConfig.put("key.converter", "org.apache.kafka.connect.storage.StringConverter");
connectorConfig.put("value.converter", "org.apache.kafka.connect.storage.StringConverter");
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").withConfig(connectorConfig).endSpec().build());
KafkaConnectorUtils.waitForConnectorReady(clusterName);
String kafkaConnectPodName = kubeClient().listPods(clusterName, Labels.STRIMZI_KIND_LABEL, connectLabel).get(0).getMetadata().getName();
KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(kafkaConnectPodName);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient().listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespace).withClusterName(sharedKafkaClusterName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
int sent = internalKafkaClient.sendMessagesPlain();
assertThat(sent, is(MESSAGE_COUNT));
KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(kafkaConnectPodName, Constants.DEFAULT_SINK_FILE_PATH, "99");
}
use of io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient in project strimzi by strimzi.
the class RollingUpdateST method testZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(SCALABILITY)
void testZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaUser user = KafkaUserTemplates.tlsUser(namespaceName, clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
// kafka cluster already deployed
LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", clusterName);
final int initialZkReplicas = kubeClient(namespaceName).getClient().pods().inNamespace(namespaceName).withLabelSelector(zkSelector).list().getItems().size();
assertThat(initialZkReplicas, is(3));
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String defaultKafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
final int scaleZkTo = initialZkReplicas + 4;
final List<String> newZkPodNames = new ArrayList<String>() {
{
for (int i = initialZkReplicas; i < scaleZkTo; i++) {
add(KafkaResources.zookeeperPodName(clusterName, i));
}
}
};
LOGGER.info("Scale up Zookeeper to {}", scaleZkTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getZookeeper().setReplicas(scaleZkTo), namespaceName);
int received = internalKafkaClient.receiveMessagesTls();
assertThat(received, is(MESSAGE_COUNT));
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, zkSelector, scaleZkTo);
// check the new node is either in leader or follower state
KafkaUtils.waitForZkMntr(namespaceName, clusterName, ZK_SERVER_STATE, 0, 1, 2, 3, 4, 5, 6);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String scaleUpTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, scaleUpTopicName, 1, 1).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(scaleUpTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
// scale down
LOGGER.info("Scale down Zookeeper to {}", initialZkReplicas);
// Get zk-3 uid before deletion
String uid = kubeClient(namespaceName).getPodUid(newZkPodNames.get(3));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getZookeeper().setReplicas(initialZkReplicas), namespaceName);
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, zkSelector, initialZkReplicas);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
// Wait for one zk pods will became leader and others follower state
KafkaUtils.waitForZkMntr(namespaceName, clusterName, ZK_SERVER_STATE, 0, 1, 2);
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// Create new topic to ensure, that ZK is working properly
String scaleDownTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, scaleDownTopicName, 1, 1).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(scaleDownTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
// Test that the second pod has event 'Killing'
assertThat(kubeClient(namespaceName).listEventsByResourceUid(uid), hasAllOfReasons(Killing));
}
use of io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient in project strimzi by strimzi.
the class CustomAuthorizerST method testAclWithSuperUser.
@ParallelTest
@Tag(INTERNAL_CLIENTS_USED)
void testAclWithSuperUser(ExtensionContext extensionContext) {
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CLUSTER_NAME, topicName, namespace).build());
KafkaUser adminUser = KafkaUserTemplates.tlsUser(namespace, CLUSTER_NAME, ADMIN).editSpec().withNewKafkaUserAuthorizationSimple().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.WRITE).endAcl().addNewAcl().withNewAclRuleTopicResource().withName(topicName).endAclRuleTopicResource().withOperation(AclOperation.DESCRIBE).endAcl().endKafkaUserAuthorizationSimple().endSpec().build();
resourceManager.createResource(extensionContext, adminUser);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(namespace, true, kafkaClientsName, adminUser).build());
String kafkaClientsPodName = kubeClient(namespace).listPodsByPrefixInName(namespace, kafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Checking kafka super user:{} that is able to send messages to topic:{}", ADMIN, topicName);
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withTopicName(topicName).withNamespaceName(namespace).withClusterName(CLUSTER_NAME).withKafkaUsername(ADMIN).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).withUsingPodName(kafkaClientsPodName).build();
assertThat(internalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));
LOGGER.info("Checking kafka super user:{} that is able to read messages to topic:{} regardless that " + "we configured Acls with only write operation", ADMIN, TOPIC_NAME);
assertThat(internalKafkaClient.receiveMessagesTls(), is(MESSAGE_COUNT));
}
Aggregations