Search in sources :

Example 21 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi-kafka-operator by strimzi.

the class LogDumpScriptIsolatedST method dumpPartitions.

@IsolatedTest
void dumpPartitions(ExtensionContext context) {
    TestStorage storage = new TestStorage(context);
    String groupId = "my-group";
    String partitionNumber = "0";
    String outPath = USER_PATH + "/target/" + storage.getClusterName();
    resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(storage.getClusterName(), 1, 1).editMetadata().withNamespace(storage.getNamespaceName()).endMetadata().build());
    KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(storage.getTopicName()).withMessageCount(10).withBootstrapAddress(KafkaResources.plainBootstrapAddress(storage.getClusterName())).withProducerName(storage.getProducerName()).withConsumerName(storage.getConsumerName()).withNamespaceName(storage.getNamespaceName()).withConsumerGroup(groupId).build();
    // send messages and consume them
    resourceManager.createResource(context, kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi());
    ClientUtils.waitForClientsSuccess(storage.getProducerName(), storage.getConsumerName(), storage.getNamespaceName(), MESSAGE_COUNT);
    // dry run
    LOGGER.info("Print partition segments from cluster {}/{}", storage.getNamespaceName(), storage.getClusterName());
    String[] printCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", storage.getNamespaceName(), "--cluster", storage.getClusterName(), "--topic", storage.getTopicName(), "--partition", partitionNumber, "--dry-run" };
    Exec.exec(Level.INFO, printCmd);
    assertThat("Output directory created in dry mode", Files.notExists(Paths.get(outPath)));
    // partition dump
    LOGGER.info("Dump topic partition from cluster {}/{}", storage.getNamespaceName(), storage.getClusterName());
    String[] dumpPartCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", storage.getNamespaceName(), "--cluster", storage.getClusterName(), "--topic", storage.getTopicName(), "--partition", partitionNumber, "--out-path", outPath };
    Exec.exec(Level.INFO, dumpPartCmd);
    assertThat("No output directory created", Files.exists(Paths.get(outPath)));
    String dumpPartFilePath = outPath + "/" + storage.getTopicName() + "/kafka-0-" + storage.getTopicName() + "-" + partitionNumber + "/00000000000000000000.log";
    assertThat("No partition file created", Files.exists(Paths.get(dumpPartFilePath)));
    assertThat("Empty partition file", new File(dumpPartFilePath).length() > 0);
    // __consumer_offsets dump
    LOGGER.info("Dump consumer offsets partition from cluster {}/{}", storage.getNamespaceName(), storage.getClusterName());
    String[] dumpCgCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "cg_offsets", "--namespace", storage.getNamespaceName(), "--cluster", storage.getClusterName(), "--group-id", groupId, "--out-path", outPath };
    Exec.exec(Level.INFO, dumpCgCmd);
    assertThat("No output directory created", Files.exists(Paths.get(outPath)));
    String dumpCgFilePath = outPath + "/__consumer_offsets/kafka-0-__consumer_offsets-12/00000000000000000000.log";
    assertThat("No partition file created", Files.exists(Paths.get(dumpCgFilePath)));
    assertThat("Empty partition file", new File(dumpCgFilePath).length() > 0);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) TestStorage(io.strimzi.systemtest.storage.TestStorage) File(java.io.File) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 22 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi-kafka-operator by strimzi.

the class ColdBackupScriptIsolatedST method backupAndRestore.

@IsolatedTest
@KRaftNotSupported("Debug needed - https://github.com/strimzi/strimzi-kafka-operator/issues/6863")
void backupAndRestore(ExtensionContext context) {
    String clusterName = mapWithClusterNames.get(context.getDisplayName());
    String groupId = "my-group", newGroupId = "new-group";
    String topicName = mapWithTestTopics.get(context.getDisplayName());
    String producerName = clusterName + "-producer";
    String consumerName = clusterName + "-consumer";
    int firstBatchSize = 100, secondBatchSize = 10;
    String backupFilePath = USER_PATH + "/target/" + clusterName + ".tgz";
    resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(clusterOperator.getDeploymentNamespace()).endMetadata().build());
    KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withNamespaceName(clusterOperator.getDeploymentNamespace()).withTopicName(topicName).withConsumerGroup(groupId).withMessageCount(firstBatchSize).build();
    // send messages and consume them
    resourceManager.createResource(context, clients.producerStrimzi(), clients.consumerStrimzi());
    ClientUtils.waitForClientsSuccess(producerName, consumerName, clusterOperator.getDeploymentNamespace(), firstBatchSize);
    // save consumer group offsets
    int offsetsBeforeBackup = KafkaUtils.getCurrentOffsets(KafkaResources.kafkaPodName(clusterName, 0), topicName, groupId);
    assertThat("No offsets map before backup", offsetsBeforeBackup > 0);
    // send additional messages
    clients = new KafkaClientsBuilder(clients).withMessageCount(secondBatchSize).build();
    resourceManager.createResource(context, clients.producerStrimzi());
    ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), firstBatchSize);
    // backup command
    LOGGER.info("Running backup procedure for {}/{}", clusterOperator.getDeploymentNamespace(), clusterName);
    String[] backupCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "backup", "-n", clusterOperator.getDeploymentNamespace(), "-c", clusterName, "-t", backupFilePath, "-y" };
    Exec.exec(Level.INFO, backupCommand);
    clusterOperator.unInstall();
    clusterOperator = clusterOperator.defaultInstallation().createInstallation().runInstallation();
    // restore command
    LOGGER.info("Running restore procedure for {}/{}", clusterOperator.getDeploymentNamespace(), clusterName);
    String[] restoreCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "restore", "-n", clusterOperator.getDeploymentNamespace(), "-c", clusterName, "-s", backupFilePath, "-y" };
    Exec.exec(Level.INFO, restoreCommand);
    // check consumer group offsets
    KafkaUtils.waitForKafkaReady(clusterName);
    clients = new KafkaClientsBuilder(clients).withMessageCount(secondBatchSize).build();
    int offsetsAfterRestore = KafkaUtils.getCurrentOffsets(KafkaResources.kafkaPodName(clusterName, 0), topicName, groupId);
    assertThat("Current consumer group offsets are not the same as before the backup", offsetsAfterRestore, is(offsetsBeforeBackup));
    // check consumer group recovery
    resourceManager.createResource(context, clients.consumerStrimzi());
    ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), secondBatchSize);
    JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), consumerName);
    // check total number of messages
    int batchSize = firstBatchSize + secondBatchSize;
    clients = new KafkaClientsBuilder(clients).withConsumerGroup(newGroupId).withMessageCount(batchSize).build();
    resourceManager.createResource(context, clients.consumerStrimzi());
    ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), batchSize);
    JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), consumerName);
}
Also used : KafkaClientsBuilder(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder) KafkaClients(io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients) KRaftNotSupported(io.strimzi.systemtest.annotations.KRaftNotSupported) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Aggregations

IsolatedTest (io.strimzi.test.annotations.IsolatedTest)22 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)12 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)12 EnvVar (io.fabric8.kubernetes.api.model.EnvVar)8 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)8 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)8 ArrayList (java.util.ArrayList)8 Random (java.util.Random)8 Tag (org.junit.jupiter.api.Tag)8 Pod (io.fabric8.kubernetes.api.model.Pod)6 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)6 KafkaTopic (io.strimzi.api.kafka.model.KafkaTopic)6 OrderedProperties (io.strimzi.operator.common.model.OrderedProperties)6 File (java.io.File)6 HashMap (java.util.HashMap)6 List (java.util.List)6 Map (java.util.Map)6 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)6 Matchers.containsString (org.hamcrest.Matchers.containsString)5 Matchers.emptyString (org.hamcrest.Matchers.emptyString)5