use of io.strimzi.api.kafka.model.EntityOperatorSpecBuilder in project strimzi-kafka-operator by strimzi.
the class NamespaceDeletionRecoveryIsolatedST method testTopicNotAvailable.
/**
* In case we don't have KafkaTopic resources from before the cluster loss, we do these steps:
* 1. deploy the Kafka cluster without Topic Operator - otherwise topics will be deleted
* 2. delete KafkaTopic Store topics - `__strimzi-topic-operator-kstreams-topic-store-changelog` and `__strimzi_store_topic`
* 3. enable Topic Operator by redeploying Kafka cluster
* @throws InterruptedException - sleep
*/
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(INTERNAL_CLIENTS_USED)
void testTopicNotAvailable(ExtensionContext extensionContext) throws InterruptedException {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
prepareEnvironmentForRecovery(extensionContext, topicName);
// Wait till consumer offset topic is created
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix("consumer-offsets");
// Get list of topics and list of PVC needed for recovery
List<PersistentVolumeClaim> persistentVolumeClaimList = kubeClient().getClient().persistentVolumeClaims().list().getItems();
deleteAndRecreateNamespace();
recreatePvcAndUpdatePv(persistentVolumeClaimList);
recreateClusterOperator(extensionContext);
// Recreate Kafka Cluster
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endKafka().editZookeeper().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endZookeeper().withNewEntityOperator().endEntityOperator().endSpec().build());
// Wait some time after kafka is ready before delete topics files
Thread.sleep(60000);
// Remove all topic data from topic store
String deleteTopicStoreTopics = "./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi-topic-operator-kstreams-topic-store-changelog --delete " + "&& ./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi_store_topic --delete";
cmdKubeClient(INFRA_NAMESPACE).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", deleteTopicStoreTopics);
// Wait till exec result will be finish
Thread.sleep(30000);
KafkaResource.replaceKafkaResource(clusterName, k -> {
k.getSpec().setEntityOperator(new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().build());
});
DeploymentUtils.waitForDeploymentAndPodsReady(KafkaResources.entityOperatorDeploymentName(clusterName), 1);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(INFRA_NAMESPACE).withClusterName(CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", internalKafkaClient.getPodName());
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
Aggregations