use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaUserStatusNotReady.
@ParallelTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testKafkaUserStatusNotReady(ExtensionContext extensionContext) {
// Simulate NotReady state with userName longer than 64 characters
String userName = "sasl-use-rabcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
KafkaUserUtils.waitForKafkaUserNotReady(clusterOperator.getDeploymentNamespace(), userName);
LOGGER.info("Checking status of deployed KafkaUser {}", userName);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(userName).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
LOGGER.info("KafkaUser Message: {}", kafkaCondition.getMessage());
LOGGER.info("KafkaUser Reason: {}", kafkaCondition.getReason());
assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(NotReady.toString()));
LOGGER.info("KafkaUser {} is in desired state: {}", userName, kafkaCondition.getType());
KafkaUserResource.kafkaUserClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(userName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(clusterOperator.getDeploymentNamespace(), userName);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaUserStatus.
@ParallelTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testKafkaUserStatus(ExtensionContext extensionContext) {
String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(clusterOperator.getDeploymentNamespace(), CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
LOGGER.info("Checking status of deployed KafkaUser");
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(clusterOperator.getDeploymentNamespace()).withName(userName).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(Ready.toString()));
LOGGER.info("KafkaUser is in desired state: Ready");
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class ReconciliationST method testPauseReconciliationInKafkaAndKafkaConnectWithConnector.
@ParallelNamespaceTest
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
@KRaftNotSupported("Probably bug - https://github.com/strimzi/strimzi-kafka-operator/issues/6862")
void testPauseReconciliationInKafkaAndKafkaConnectWithConnector(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(clusterOperator.getDeploymentNamespace(), extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName);
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaSsName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).build());
LOGGER.info("Adding pause annotation into Kafka resource and also scaling replicas to 4, new pod should not appear");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getMetadata().setAnnotations(PAUSE_ANNO);
kafka.getSpec().getKafka().setReplicas(SCALE_TO);
}, namespaceName);
LOGGER.info("Kafka should contain status with {}", CustomResourceStatus.ReconciliationPaused.toString());
KafkaUtils.waitForKafkaStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, kafkaSsName, 3);
LOGGER.info("Setting annotation to \"false\", Kafka should be scaled to {}", SCALE_TO);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, kafkaSelector, SCALE_TO);
LOGGER.info("Deploying KafkaConnect with pause annotation from the start, no pods should appear");
resourceManager.createResource(extensionContext, false, KafkaConnectTemplates.kafkaConnectWithFilePlugin(namespaceName, clusterName, 1).editOrNewMetadata().addToAnnotations(PAUSE_ANNO).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
String connectDepName = KafkaConnectResources.deploymentName(clusterName);
KafkaConnectUtils.waitForConnectStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, connectDepName, 0);
LOGGER.info("Setting annotation to \"false\" and creating KafkaConnector");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kc -> kc.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, connectDepName, 1);
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).build());
String connectPodName = kubeClient(namespaceName).listPods(clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName();
String connectorSpec = KafkaConnectorUtils.getConnectorSpecFromConnectAPI(namespaceName, connectPodName, clusterName);
LOGGER.info("Adding pause annotation into the KafkaConnector and scaling taskMax to 4");
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(clusterName, connector -> {
connector.getMetadata().setAnnotations(PAUSE_ANNO);
connector.getSpec().setTasksMax(SCALE_TO);
}, namespaceName);
KafkaConnectorUtils.waitForConnectorStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused);
KafkaConnectorUtils.waitForConnectorSpecFromConnectAPIStability(namespaceName, connectPodName, clusterName, connectorSpec);
LOGGER.info("Setting annotation to \"false\", taskMax should be increased to {}", SCALE_TO);
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(clusterName, connector -> connector.getMetadata().getAnnotations().replace(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true", "false"), namespaceName);
String oldConfig = new JsonObject(connectorSpec).getValue("config").toString();
JsonObject newConfig = new JsonObject(KafkaConnectorUtils.waitForConnectorConfigUpdate(namespaceName, connectPodName, clusterName, oldConfig, "localhost"));
assertThat(newConfig.getValue("tasks.max"), is(Integer.toString(SCALE_TO)));
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class ColdBackupScriptIsolatedST method backupAndRestore.
@IsolatedTest
@KRaftNotSupported("Debug needed - https://github.com/strimzi/strimzi-kafka-operator/issues/6863")
void backupAndRestore(ExtensionContext context) {
String clusterName = mapWithClusterNames.get(context.getDisplayName());
String groupId = "my-group", newGroupId = "new-group";
String topicName = mapWithTestTopics.get(context.getDisplayName());
String producerName = clusterName + "-producer";
String consumerName = clusterName + "-consumer";
int firstBatchSize = 100, secondBatchSize = 10;
String backupFilePath = USER_PATH + "/target/" + clusterName + ".tgz";
resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(clusterOperator.getDeploymentNamespace()).endMetadata().build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withNamespaceName(clusterOperator.getDeploymentNamespace()).withTopicName(topicName).withConsumerGroup(groupId).withMessageCount(firstBatchSize).build();
// send messages and consume them
resourceManager.createResource(context, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(producerName, consumerName, clusterOperator.getDeploymentNamespace(), firstBatchSize);
// save consumer group offsets
int offsetsBeforeBackup = KafkaUtils.getCurrentOffsets(KafkaResources.kafkaPodName(clusterName, 0), topicName, groupId);
assertThat("No offsets map before backup", offsetsBeforeBackup > 0);
// send additional messages
clients = new KafkaClientsBuilder(clients).withMessageCount(secondBatchSize).build();
resourceManager.createResource(context, clients.producerStrimzi());
ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), firstBatchSize);
// backup command
LOGGER.info("Running backup procedure for {}/{}", clusterOperator.getDeploymentNamespace(), clusterName);
String[] backupCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "backup", "-n", clusterOperator.getDeploymentNamespace(), "-c", clusterName, "-t", backupFilePath, "-y" };
Exec.exec(Level.INFO, backupCommand);
clusterOperator.unInstall();
clusterOperator = clusterOperator.defaultInstallation().createInstallation().runInstallation();
// restore command
LOGGER.info("Running restore procedure for {}/{}", clusterOperator.getDeploymentNamespace(), clusterName);
String[] restoreCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "restore", "-n", clusterOperator.getDeploymentNamespace(), "-c", clusterName, "-s", backupFilePath, "-y" };
Exec.exec(Level.INFO, restoreCommand);
// check consumer group offsets
KafkaUtils.waitForKafkaReady(clusterName);
clients = new KafkaClientsBuilder(clients).withMessageCount(secondBatchSize).build();
int offsetsAfterRestore = KafkaUtils.getCurrentOffsets(KafkaResources.kafkaPodName(clusterName, 0), topicName, groupId);
assertThat("Current consumer group offsets are not the same as before the backup", offsetsAfterRestore, is(offsetsBeforeBackup));
// check consumer group recovery
resourceManager.createResource(context, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), secondBatchSize);
JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), consumerName);
// check total number of messages
int batchSize = firstBatchSize + secondBatchSize;
clients = new KafkaClientsBuilder(clients).withConsumerGroup(newGroupId).withMessageCount(batchSize).build();
resourceManager.createResource(context, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), batchSize);
JobUtils.deleteJobWithWait(clusterOperator.getDeploymentNamespace(), consumerName);
}
use of io.strimzi.systemtest.annotations.KRaftNotSupported in project strimzi by strimzi.
the class AllNamespaceIsolatedST method testUserInDifferentNamespace.
@IsolatedTest
@KRaftNotSupported("UserOperator is not supported by KRaft mode and is used in this test case")
void testUserInDifferentNamespace(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, SECOND_NAMESPACE);
String startingNamespace = cluster.setNamespace(SECOND_NAMESPACE);
KafkaUser user = KafkaUserTemplates.tlsUser(MAIN_NAMESPACE_CLUSTER_NAME, USER_NAME).build();
resourceManager.createResource(extensionContext, user);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(SECOND_NAMESPACE).withName(USER_NAME).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser condition status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser condition type: {}", kafkaCondition.getType());
assertThat(kafkaCondition.getType(), is(Ready.toString()));
List<Secret> secretsOfSecondNamespace = kubeClient(SECOND_NAMESPACE).listSecrets();
cluster.setNamespace(THIRD_NAMESPACE);
for (Secret s : secretsOfSecondNamespace) {
if (s.getMetadata().getName().equals(USER_NAME)) {
LOGGER.info("Copying secret {} from namespace {} to namespace {}", s, SECOND_NAMESPACE, THIRD_NAMESPACE);
copySecret(s, THIRD_NAMESPACE, USER_NAME);
}
}
KafkaClients kafkaClients = new KafkaClientsBuilder().withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withBootstrapAddress(KafkaResources.tlsBootstrapAddress(MAIN_NAMESPACE_CLUSTER_NAME)).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withNamespaceName(THIRD_NAMESPACE).withUserName(USER_NAME).build();
resourceManager.createResource(extensionContext, kafkaClients.producerTlsStrimzi(MAIN_NAMESPACE_CLUSTER_NAME), kafkaClients.consumerTlsStrimzi(MAIN_NAMESPACE_CLUSTER_NAME));
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), THIRD_NAMESPACE, MESSAGE_COUNT);
cluster.setNamespace(startingNamespace);
}
Aggregations