Search in sources :

Example 6 with KafkaAdminClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder in project strimzi by strimzi.

the class ThrottlingQuotaST method testThrottlingQuotasDuringAllTopicOperations.

@ParallelTest
void testThrottlingQuotasDuringAllTopicOperations(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    final String createAdminName = "create-" + testStorage.getAdminName();
    final String alterAdminName = "alter-" + testStorage.getAdminName();
    final String deleteAdminName = "delete-" + testStorage.getAdminName();
    final String listAdminName = "list-" + testStorage.getAdminName();
    final String kafkaPodName = KafkaResources.kafkaPodName(sharedTestStorage.getClusterName(), 0);
    final String plainBootstrapName = KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName());
    int numOfTopics = 25;
    int numOfPartitions = 100;
    int iterations = numOfTopics / 5;
    KafkaAdminClients createTopicJob = adminClientsBuilder.withAdminName(createAdminName).withTopicName(testStorage.getTopicName()).withTopicCount(numOfTopics).withPartitions(numOfPartitions).withAdminOperation(AdminClientOperation.CREATE_TOPICS).build();
    LOGGER.info("Creating {} topics with {} partitions, we should hit the quota", numOfTopics, numOfPartitions);
    resourceManager.createResource(extensionContext, createTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(createAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    KafkaTopicUtils.deleteAllKafkaTopicsByPrefixWithWait(testStorage.getNamespaceName(), testStorage.getTopicName());
    // we need to wait for all KafkaTopics to be deleted from Kafka before proceeding - using Kafka pod cli (with AdminClient props)
    KafkaTopicUtils.waitForTopicsByPrefixDeletionUsingPodCli(testStorage.getNamespaceName(), testStorage.getTopicName(), plainBootstrapName, kafkaPodName, createTopicJob.getAdditionalConfig());
    numOfPartitions = 5;
    createTopicJob = new KafkaAdminClientsBuilder(createTopicJob).withPartitions(numOfPartitions).build();
    LOGGER.info("Creating {} topics with {} partitions, the quota should not be exceeded", numOfTopics, numOfPartitions);
    resourceManager.createResource(extensionContext, createTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(createAdminName, testStorage.getNamespaceName(), "All topics created");
    KafkaAdminClients listTopicJob = new KafkaAdminClientsBuilder(createTopicJob).withAdminName(listAdminName).withTopicName("").withAdminOperation(AdminClientOperation.LIST_TOPICS).build();
    LOGGER.info("Listing topics after creation");
    resourceManager.createResource(extensionContext, listTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(listAdminName, testStorage.getNamespaceName(), testStorage.getTopicName() + "-" + (numOfTopics - 1));
    int partitionAlter = 25;
    KafkaAdminClients alterTopicsJob = new KafkaAdminClientsBuilder(createTopicJob).withAdminName(alterAdminName).withPartitions(partitionAlter).withAdminOperation(AdminClientOperation.UPDATE_TOPICS).build();
    LOGGER.info("Altering {} topics - setting partitions to {} - we should hit the quota", numOfTopics, partitionAlter);
    // because we are not hitting the quota, this should pass without a problem
    resourceManager.createResource(extensionContext, alterTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(alterAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    // we need to set higher partitions - for case when we altered some topics before hitting the quota to 25 partitions
    partitionAlter = 30;
    int numOfTopicsIter = 5;
    alterTopicsJob = new KafkaAdminClientsBuilder(alterTopicsJob).withPartitions(partitionAlter).withTopicCount(numOfTopicsIter).build();
    for (int i = 0; i < iterations; i++) {
        alterTopicsJob = new KafkaAdminClientsBuilder(alterTopicsJob).withTopicCount(numOfTopicsIter).withTopicOffset(numOfTopicsIter * i).build();
        LOGGER.info("Altering {} topics with offset {} - setting partitions to {} - we should not hit the quota", numOfTopicsIter, numOfTopicsIter * i, partitionAlter);
        resourceManager.createResource(extensionContext, alterTopicsJob.defaultAdmin());
        ClientUtils.waitForClientContainsMessage(alterAdminName, testStorage.getNamespaceName(), "All topics altered");
    }
    // delete few topics
    KafkaAdminClients deleteTopicsJob = adminClientsBuilder.withTopicName(testStorage.getTopicName()).withAdminName(deleteAdminName).withAdminOperation(AdminClientOperation.DELETE_TOPICS).withTopicCount(numOfTopicsIter).build();
    LOGGER.info("Deleting first {} topics, we will not hit the quota", numOfTopicsIter);
    resourceManager.createResource(extensionContext, deleteTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(deleteAdminName, testStorage.getNamespaceName(), "Successfully removed all " + numOfTopicsIter);
    int remainingTopics = numOfTopics - numOfTopicsIter;
    deleteTopicsJob = new KafkaAdminClientsBuilder(deleteTopicsJob).withTopicCount(remainingTopics).withTopicOffset(numOfTopicsIter).build();
    LOGGER.info("Trying to remove all remaining {} topics with offset of {} - we should hit the quota", remainingTopics, numOfTopicsIter);
    resourceManager.createResource(extensionContext, deleteTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(deleteAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    LOGGER.info("Because we hit quota, removing the remaining topics through console");
    KafkaTopicUtils.deleteAllKafkaTopicsByPrefixWithWait(testStorage.getNamespaceName(), testStorage.getTopicName());
    // we need to wait for all KafkaTopics to be deleted from Kafka before proceeding - using Kafka pod cli (with AdminClient props)
    KafkaTopicUtils.waitForTopicsByPrefixDeletionUsingPodCli(testStorage.getNamespaceName(), testStorage.getTopicName(), plainBootstrapName, kafkaPodName, createTopicJob.getAdditionalConfig());
    // List topics after deletion
    resourceManager.createResource(extensionContext, listTopicJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(listAdminName, testStorage.getNamespaceName(), 0, false);
    String listPodName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), listAdminName);
    String afterDeletePodLogs = kubeClient().logsInSpecificNamespace(testStorage.getNamespaceName(), listPodName);
    assertFalse(afterDeletePodLogs.contains(testStorage.getTopicName()));
    JobUtils.deleteJobWithWait(testStorage.getNamespaceName(), listAdminName);
}
Also used : KafkaAdminClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClients) KafkaAdminClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 7 with KafkaAdminClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder in project strimzi-kafka-operator by strimzi.

the class ThrottlingQuotaST method setup.

@BeforeAll
void setup(ExtensionContext extensionContext) {
    sharedTestStorage = new TestStorage(extensionContext, namespace);
    // Deploy kafka with ScramSHA512
    LOGGER.info("Deploying shared Kafka across all test cases in {} namespace", sharedTestStorage.getNamespaceName());
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(sharedTestStorage.getClusterName(), 3).editMetadata().withNamespace(sharedTestStorage.getNamespaceName()).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().build(), new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationTlsAuth().endKafkaListenerAuthenticationTlsAuth().build()).endKafka().endSpec().build());
    resourceManager.createResource(extensionContext, KafkaUserTemplates.defaultUser(sharedTestStorage.getNamespaceName(), sharedTestStorage.getClusterName(), sharedTestStorage.getUserName()).editOrNewSpec().withNewQuotas().withControllerMutationRate(1.0).endQuotas().withAuthentication(new KafkaUserScramSha512ClientAuthentication()).endSpec().build());
    adminClientsBuilder = new KafkaAdminClientsBuilder().withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())).withNamespaceName(sharedTestStorage.getNamespaceName()).withAdditionalConfig(KafkaAdminClients.getAdminClientScramConfig(sharedTestStorage.getNamespaceName(), sharedTestStorage.getUserName(), 240000));
}
Also used : GenericKafkaListenerBuilder(io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder) KafkaAdminClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) KafkaUserScramSha512ClientAuthentication(io.strimzi.api.kafka.model.KafkaUserScramSha512ClientAuthentication) BeforeAll(org.junit.jupiter.api.BeforeAll)

Example 8 with KafkaAdminClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder in project strimzi-kafka-operator by strimzi.

the class ThrottlingQuotaST method testThrottlingQuotasDuringAllTopicOperations.

@ParallelTest
void testThrottlingQuotasDuringAllTopicOperations(ExtensionContext extensionContext) {
    final TestStorage testStorage = new TestStorage(extensionContext, namespace);
    final String createAdminName = "create-" + testStorage.getAdminName();
    final String alterAdminName = "alter-" + testStorage.getAdminName();
    final String deleteAdminName = "delete-" + testStorage.getAdminName();
    final String listAdminName = "list-" + testStorage.getAdminName();
    final String kafkaPodName = KafkaResources.kafkaPodName(sharedTestStorage.getClusterName(), 0);
    final String plainBootstrapName = KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName());
    int numOfTopics = 25;
    int numOfPartitions = 100;
    int iterations = numOfTopics / 5;
    KafkaAdminClients createTopicJob = adminClientsBuilder.withAdminName(createAdminName).withTopicName(testStorage.getTopicName()).withTopicCount(numOfTopics).withPartitions(numOfPartitions).withAdminOperation(AdminClientOperation.CREATE_TOPICS).build();
    LOGGER.info("Creating {} topics with {} partitions, we should hit the quota", numOfTopics, numOfPartitions);
    resourceManager.createResource(extensionContext, createTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(createAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    KafkaTopicUtils.deleteAllKafkaTopicsByPrefixWithWait(testStorage.getNamespaceName(), testStorage.getTopicName());
    // we need to wait for all KafkaTopics to be deleted from Kafka before proceeding - using Kafka pod cli (with AdminClient props)
    KafkaTopicUtils.waitForTopicsByPrefixDeletionUsingPodCli(testStorage.getNamespaceName(), testStorage.getTopicName(), plainBootstrapName, kafkaPodName, createTopicJob.getAdditionalConfig());
    numOfPartitions = 5;
    createTopicJob = new KafkaAdminClientsBuilder(createTopicJob).withPartitions(numOfPartitions).build();
    LOGGER.info("Creating {} topics with {} partitions, the quota should not be exceeded", numOfTopics, numOfPartitions);
    resourceManager.createResource(extensionContext, createTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(createAdminName, testStorage.getNamespaceName(), "All topics created");
    KafkaAdminClients listTopicJob = new KafkaAdminClientsBuilder(createTopicJob).withAdminName(listAdminName).withTopicName("").withAdminOperation(AdminClientOperation.LIST_TOPICS).build();
    LOGGER.info("Listing topics after creation");
    resourceManager.createResource(extensionContext, listTopicJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(listAdminName, testStorage.getNamespaceName(), testStorage.getTopicName() + "-" + (numOfTopics - 1));
    int partitionAlter = 25;
    KafkaAdminClients alterTopicsJob = new KafkaAdminClientsBuilder(createTopicJob).withAdminName(alterAdminName).withPartitions(partitionAlter).withAdminOperation(AdminClientOperation.UPDATE_TOPICS).build();
    LOGGER.info("Altering {} topics - setting partitions to {} - we should hit the quota", numOfTopics, partitionAlter);
    // because we are not hitting the quota, this should pass without a problem
    resourceManager.createResource(extensionContext, alterTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(alterAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    // we need to set higher partitions - for case when we altered some topics before hitting the quota to 25 partitions
    partitionAlter = 30;
    int numOfTopicsIter = 5;
    alterTopicsJob = new KafkaAdminClientsBuilder(alterTopicsJob).withPartitions(partitionAlter).withTopicCount(numOfTopicsIter).build();
    for (int i = 0; i < iterations; i++) {
        alterTopicsJob = new KafkaAdminClientsBuilder(alterTopicsJob).withTopicCount(numOfTopicsIter).withTopicOffset(numOfTopicsIter * i).build();
        LOGGER.info("Altering {} topics with offset {} - setting partitions to {} - we should not hit the quota", numOfTopicsIter, numOfTopicsIter * i, partitionAlter);
        resourceManager.createResource(extensionContext, alterTopicsJob.defaultAdmin());
        ClientUtils.waitForClientContainsMessage(alterAdminName, testStorage.getNamespaceName(), "All topics altered");
    }
    // delete few topics
    KafkaAdminClients deleteTopicsJob = adminClientsBuilder.withTopicName(testStorage.getTopicName()).withAdminName(deleteAdminName).withAdminOperation(AdminClientOperation.DELETE_TOPICS).withTopicCount(numOfTopicsIter).build();
    LOGGER.info("Deleting first {} topics, we will not hit the quota", numOfTopicsIter);
    resourceManager.createResource(extensionContext, deleteTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(deleteAdminName, testStorage.getNamespaceName(), "Successfully removed all " + numOfTopicsIter);
    int remainingTopics = numOfTopics - numOfTopicsIter;
    deleteTopicsJob = new KafkaAdminClientsBuilder(deleteTopicsJob).withTopicCount(remainingTopics).withTopicOffset(numOfTopicsIter).build();
    LOGGER.info("Trying to remove all remaining {} topics with offset of {} - we should hit the quota", remainingTopics, numOfTopicsIter);
    resourceManager.createResource(extensionContext, deleteTopicsJob.defaultAdmin());
    ClientUtils.waitForClientContainsMessage(deleteAdminName, testStorage.getNamespaceName(), THROTTLING_ERROR_MSG);
    LOGGER.info("Because we hit quota, removing the remaining topics through console");
    KafkaTopicUtils.deleteAllKafkaTopicsByPrefixWithWait(testStorage.getNamespaceName(), testStorage.getTopicName());
    // we need to wait for all KafkaTopics to be deleted from Kafka before proceeding - using Kafka pod cli (with AdminClient props)
    KafkaTopicUtils.waitForTopicsByPrefixDeletionUsingPodCli(testStorage.getNamespaceName(), testStorage.getTopicName(), plainBootstrapName, kafkaPodName, createTopicJob.getAdditionalConfig());
    // List topics after deletion
    resourceManager.createResource(extensionContext, listTopicJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(listAdminName, testStorage.getNamespaceName(), 0, false);
    String listPodName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), listAdminName);
    String afterDeletePodLogs = kubeClient().logsInSpecificNamespace(testStorage.getNamespaceName(), listPodName);
    assertFalse(afterDeletePodLogs.contains(testStorage.getTopicName()));
    JobUtils.deleteJobWithWait(testStorage.getNamespaceName(), listAdminName);
}
Also used : KafkaAdminClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClients) KafkaAdminClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder) TestStorage(io.strimzi.systemtest.storage.TestStorage) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 9 with KafkaAdminClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder in project strimzi-kafka-operator by strimzi.

the class ThrottlingQuotaST method testThrottlingQuotasCreateAlterPartitions.

@ParallelTest
void testThrottlingQuotasCreateAlterPartitions(ExtensionContext extensionContext) {
    final String kafkaUsername = mapWithTestUsers.get(extensionContext.getDisplayName());
    final String createAdminName = "create-admin-" + mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String alterAdminName = "alter-admin-" + mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String topicNamePrefix = classTopicPrefix + "-partitions";
    int topicsCount = 50;
    int topicPartitions = 100;
    setupKafkaUserInNamespace(extensionContext, kafkaUsername);
    KafkaAdminClients adminClientJob = new KafkaAdminClientsBuilder().withAdminName(createAdminName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)).withTopicName(topicNamePrefix).withTopicCount(topicsCount).withPartitions(topicPartitions).withNamespaceName(namespace).withTopicOperation(AdminClientOperations.CREATE_TOPICS.toString()).withAdditionalConfig(KafkaAdminClients.getAdminClientScramConfig(namespace, kafkaUsername, 240000)).build();
    resourceManager.createResource(extensionContext, adminClientJob.defaultAdmin());
    String createPodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", createAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, createPodName, "org.apache.kafka.common.errors.ThrottlingQuotaExceededException: The throttling quota has been exceeded.", Constants.GLOBAL_TIMEOUT);
    JobUtils.deleteJobWithWait(namespace, createAdminName);
    // Delete created topics (as they were created in random order, we have to use KafkaTopic instead of this client)
    KafkaTopicUtils.deleteAllKafkaTopicsWithPrefix(namespace, topicNamePrefix);
    // Throttling quota after performed 'alter' partitions on existing topic
    int topicAlter = 20;
    adminClientJob = new KafkaAdminClientsBuilder(adminClientJob).withTopicCount(topicAlter).withPartitions(1).build();
    resourceManager.createResource(extensionContext, adminClientJob.defaultAdmin());
    createPodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", createAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, createPodName, "All topics created", GLOBAL_TIMEOUT);
    JobUtils.deleteJobWithWait(namespace, createAdminName);
    // All topics altered
    adminClientJob = new KafkaAdminClientsBuilder(adminClientJob).withAdminName(alterAdminName).withTopicCount(topicAlter).withPartitions(500).withTopicOperation(AdminClientOperations.UPDATE_TOPICS.toString()).build();
    resourceManager.createResource(extensionContext, adminClientJob.defaultAdmin());
    String alterPodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", alterAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, alterPodName, "org.apache.kafka.common.errors.ThrottlingQuotaExceededException: The throttling quota has been exceeded.", GLOBAL_TIMEOUT);
    JobUtils.deleteJobWithWait(namespace, alterAdminName);
    // Teardown - delete all (remaining) topics (within Quota limits)
    String teardownClientName = "teardown-delete";
    KafkaAdminClients deleteAdminClientJob = new KafkaAdminClientsBuilder().withAdminName(teardownClientName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)).withTopicName(topicNamePrefix).withTopicCount(topicAlter).withNamespaceName(namespace).withTopicOperation(AdminClientOperations.DELETE_TOPICS.toString()).build();
    resourceManager.createResource(extensionContext, deleteAdminClientJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(teardownClientName, namespace, 10);
    JobUtils.deleteJobWithWait(namespace, teardownClientName);
}
Also used : KafkaAdminClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClients) KafkaAdminClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Example 10 with KafkaAdminClientsBuilder

use of io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder in project strimzi-kafka-operator by strimzi.

the class ThrottlingQuotaST method testKafkaAdminTopicOperations.

@ParallelTest
void testKafkaAdminTopicOperations(ExtensionContext extensionContext) {
    final String kafkaUsername = mapWithTestUsers.get(extensionContext.getDisplayName());
    final String createAdminName = "create-admin-" + mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String deleteAdminName = "delete-admin-" + mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String listAdminName = "list-admin-" + mapWithKafkaClientNames.get(extensionContext.getDisplayName());
    final String topicNamePrefix = classTopicPrefix + "-simple";
    int topicsCountBelowQuota = 100;
    setupKafkaUserInNamespace(extensionContext, kafkaUsername);
    // Create 'topicsCountBelowQuota' topics
    KafkaAdminClients adminClientJob = new KafkaAdminClientsBuilder().withAdminName(createAdminName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)).withTopicName(topicNamePrefix).withTopicCount(topicsCountBelowQuota).withNamespaceName(namespace).withTopicOperation(AdminClientOperations.CREATE_TOPICS.toString()).withAdditionalConfig(KafkaAdminClients.getAdminClientScramConfig(namespace, kafkaUsername, 240000)).build();
    resourceManager.createResource(extensionContext, adminClientJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(createAdminName, namespace, topicsCountBelowQuota);
    String createPodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", createAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, createPodName, "All topics created");
    // List 'topicsCountBelowQuota' topics
    KafkaAdminClients adminClientListJob = new KafkaAdminClientsBuilder().withAdminName(listAdminName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)).withNamespaceName(namespace).withTopicOperation(AdminClientOperations.LIST_TOPICS.toString()).withAdditionalConfig(KafkaAdminClients.getAdminClientScramConfig(namespace, kafkaUsername, 600000)).build();
    resourceManager.createResource(extensionContext, adminClientListJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(listAdminName, namespace, 0);
    String listPodName = kubeClient().listPodNamesInSpecificNamespace(namespace, "job-name", listAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, listPodName, topicNamePrefix + "-" + (topicsCountBelowQuota - 1));
    JobUtils.deleteJobWithWait(namespace, listAdminName);
    // Delete 'topicsCountBelowQuota' topics
    adminClientJob = new KafkaAdminClientsBuilder(adminClientJob).withAdminName(deleteAdminName).withTopicOperation(AdminClientOperations.DELETE_TOPICS.toString()).build();
    resourceManager.createResource(extensionContext, adminClientJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(deleteAdminName, namespace, 10);
    String deletePodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", deleteAdminName).get(0);
    PodUtils.waitUntilMessageIsInPodLogs(namespace, deletePodName, "Successfully removed all " + topicsCountBelowQuota);
    // List topics after deletion
    resourceManager.createResource(extensionContext, adminClientListJob.defaultAdmin());
    ClientUtils.waitForClientSuccess(listAdminName, namespace, 0);
    listPodName = kubeClient(namespace).listPodNamesInSpecificNamespace(namespace, "job-name", listAdminName).get(0);
    String afterDeletePodLogs = kubeClient(namespace).logsInSpecificNamespace(namespace, listPodName);
    assertThat(afterDeletePodLogs.contains(topicNamePrefix), is(false));
    assertThat(afterDeletePodLogs, not(containsString(topicNamePrefix)));
    JobUtils.deleteJobWithWait(namespace, createAdminName);
    JobUtils.deleteJobWithWait(namespace, listAdminName);
    JobUtils.deleteJobWithWait(namespace, deleteAdminName);
}
Also used : KafkaAdminClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClients) KafkaAdminClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder) Matchers.containsString(org.hamcrest.Matchers.containsString) ParallelTest(io.strimzi.systemtest.annotations.ParallelTest)

Aggregations

KafkaAdminClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClientsBuilder)12 ParallelTest (io.strimzi.systemtest.annotations.ParallelTest)10 KafkaAdminClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaAdminClients)10 Matchers.containsString (org.hamcrest.Matchers.containsString)8 TestStorage (io.strimzi.systemtest.storage.TestStorage)4 KafkaUserScramSha512ClientAuthentication (io.strimzi.api.kafka.model.KafkaUserScramSha512ClientAuthentication)2 GenericKafkaListenerBuilder (io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder)2 BeforeAll (org.junit.jupiter.api.BeforeAll)2