use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class NamespaceDeletionRecoveryIsolatedST method testTopicNotAvailable.
/**
* In case we don't have KafkaTopic resources from before the cluster loss, we do these steps:
* 1. deploy the Kafka cluster without Topic Operator - otherwise topics will be deleted
* 2. delete KafkaTopic Store topics - `__strimzi-topic-operator-kstreams-topic-store-changelog` and `__strimzi_store_topic`
* 3. enable Topic Operator by redeploying Kafka cluster
* @throws InterruptedException - sleep
*/
@IsolatedTest("We need for each test case its own Cluster Operator")
@Tag(INTERNAL_CLIENTS_USED)
void testTopicNotAvailable(ExtensionContext extensionContext) throws InterruptedException {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
prepareEnvironmentForRecovery(extensionContext, topicName);
// Wait till consumer offset topic is created
KafkaTopicUtils.waitForKafkaTopicCreationByNamePrefix("consumer-offsets");
// Get list of topics and list of PVC needed for recovery
List<PersistentVolumeClaim> persistentVolumeClaimList = kubeClient().getClient().persistentVolumeClaims().list().getItems();
deleteAndRecreateNamespace();
recreatePvcAndUpdatePv(persistentVolumeClaimList);
recreateClusterOperator(extensionContext);
// Recreate Kafka Cluster
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endKafka().editZookeeper().withNewPersistentClaimStorage().withSize("1Gi").withStorageClass(storageClassName).endPersistentClaimStorage().endZookeeper().withNewEntityOperator().endEntityOperator().endSpec().build());
// Wait some time after kafka is ready before delete topics files
Thread.sleep(60000);
// Remove all topic data from topic store
String deleteTopicStoreTopics = "./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi-topic-operator-kstreams-topic-store-changelog --delete " + "&& ./bin/kafka-topics.sh --bootstrap-server localhost:9092 --topic __strimzi_store_topic --delete";
cmdKubeClient(INFRA_NAMESPACE).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", deleteTopicStoreTopics);
// Wait till exec result will be finish
Thread.sleep(30000);
KafkaResource.replaceKafkaResource(clusterName, k -> {
k.getSpec().setEntityOperator(new EntityOperatorSpecBuilder().withNewTopicOperator().endTopicOperator().withNewUserOperator().endUserOperator().build());
});
DeploymentUtils.waitForDeploymentAndPodsReady(KafkaResources.entityOperatorDeploymentName(clusterName), 1);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
String defaultKafkaClientsPodName = ResourceManager.kubeClient().listPodsByPrefixInName(clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(INFRA_NAMESPACE).withClusterName(CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
LOGGER.info("Checking produced and consumed messages to pod:{}", internalKafkaClient.getPodName());
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class DrainCleanerIsolatedST method testDrainCleanerWithComponents.
@IsolatedTest
@RequiredMinKubeApiVersion(version = 1.17)
void testDrainCleanerWithComponents(ExtensionContext extensionContext) {
TestStorage testStorage = new TestStorage(extensionContext, Constants.DRAIN_CLEANER_NAMESPACE);
final int replicas = 3;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(testStorage.getClusterName(), replicas).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().editSpec().editKafka().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endKafka().editZookeeper().editOrNewTemplate().editOrNewPodDisruptionBudget().withMaxUnavailable(0).endPodDisruptionBudget().endTemplate().endZookeeper().endSpec().build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(testStorage.getClusterName(), testStorage.getTopicName()).editMetadata().withNamespace(Constants.DRAIN_CLEANER_NAMESPACE).endMetadata().build());
drainCleaner.createDrainCleaner(extensionContext);
String kafkaName = KafkaResources.kafkaStatefulSetName(testStorage.getClusterName());
String zkName = KafkaResources.zookeeperStatefulSetName(testStorage.getClusterName());
KafkaClients kafkaBasicExampleClients = new KafkaClientsBuilder().withMessageCount(300).withTopicName(testStorage.getTopicName()).withNamespaceName(Constants.DRAIN_CLEANER_NAMESPACE).withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicExampleClients.producerStrimzi(), kafkaBasicExampleClients.consumerStrimzi());
for (int i = 0; i < replicas; i++) {
String zkPodName = KafkaResources.zookeeperPodName(testStorage.getClusterName(), i);
String kafkaPodName = KafkaResources.kafkaPodName(testStorage.getClusterName(), i);
Map<String, String> kafkaPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(kafkaPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, String> zkPod = PodUtils.podSnapshot(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector()).entrySet().stream().filter(snapshot -> snapshot.getKey().equals(zkPodName)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
LOGGER.info("Evicting pods: {} and {}", zkPodName, kafkaPodName);
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(zkPodName).evict();
kubeClient().getClient().pods().inNamespace(Constants.DRAIN_CLEANER_NAMESPACE).withName(kafkaPodName).evict();
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getZookeeperSelector(), replicas, zkPod);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(Constants.DRAIN_CLEANER_NAMESPACE, testStorage.getKafkaSelector(), replicas, kafkaPod);
}
ClientUtils.waitTillContinuousClientsFinish(testStorage.getProducerName(), testStorage.getConsumerName(), Constants.DRAIN_CLEANER_NAMESPACE, 300);
}
use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class SpecificIsolatedST method testRackAware.
@IsolatedTest("UtestRackAwareConnectWrongDeploymentsing more tha one Kafka cluster in one namespace")
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAware(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String producerName = "hello-world-producer";
String consumerName = "hello-world-consumer";
String rackKey = "rack-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().endKafka().endSpec().build());
Affinity kafkaPodSpecAffinity = StUtils.getStatefulSetOrStrimziPodSetAffinity(KafkaResources.kafkaStatefulSetName(clusterName));
NodeSelectorRequirement kafkaPodNodeSelectorRequirement = kafkaPodSpecAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
assertThat(kafkaPodNodeSelectorRequirement.getKey(), is(rackKey));
assertThat(kafkaPodNodeSelectorRequirement.getOperator(), is("Exists"));
PodAffinityTerm kafkaPodAffinityTerm = kafkaPodSpecAffinity.getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution().get(0).getPodAffinityTerm();
assertThat(kafkaPodAffinityTerm.getTopologyKey(), is(rackKey));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", clusterName));
assertThat(kafkaPodAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(clusterName)));
String rackId = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /opt/kafka/init/rack.id").out();
assertThat(rackId.trim(), is("zone"));
String brokerRack = cmdKubeClient().execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cat /tmp/strimzi.properties | grep broker.rack").out();
assertThat(brokerRack.contains("broker.rack=zone"), is(true));
String uid = kubeClient().getPodUid(KafkaResources.kafkaPodName(clusterName, 0));
List<Event> events = kubeClient().listEventsByResourceUid(uid);
assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started));
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(TOPIC_NAME).withMessageCount(MESSAGE_COUNT).withDelayMs(0).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
}
use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class SpecificIsolatedST method testRackAwareConnectCorrectDeployment.
@IsolatedTest("Modification of shared Cluster Operator configuration")
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
@Tag(ACCEPTANCE)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAwareConnectCorrectDeployment(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
assumeTrue(!Environment.isHelmInstall() && !Environment.isOlmInstall());
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// We need to update CO configuration to set OPERATION_TIMEOUT to shorter value, because we expect timeout in that test
Map<String, String> coSnapshot = DeploymentUtils.depSnapshot(clusterOperator.getDeploymentNamespace(), ResourceManager.getCoDeploymentName());
// We have to install CO in class stack, otherwise it will be deleted at the end of test case and all following tests will fail
clusterOperator.unInstall();
clusterOperator = clusterOperator.defaultInstallation().withOperationTimeout(CO_OPERATION_TIMEOUT_SHORT).withReconciliationInterval(Constants.RECONCILIATION_INTERVAL).createInstallation().runBundleInstallation();
coSnapshot = DeploymentUtils.waitTillDepHasRolled(ResourceManager.getCoDeploymentName(), 1, coSnapshot);
String rackKey = "rack-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().addToConfig("replica.selector.class", "org.apache.kafka.common.replica.RackAwareReplicaSelector").endKafka().endSpec().build());
// we should create topic before KafkaConnect - topic is recreated if we delete it before KafkaConnect
String topicName = "rw-" + KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
String kafkaClientsPodName = kubeClient().listPodsByPrefixInName(kafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Deploy KafkaConnect with correct rack-aware topology key: {}", rackKey);
KafkaConnect kc = KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 1).editSpec().withNewRack().withTopologyKey(rackKey).endRack().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").endSpec().build();
resourceManager.createResource(extensionContext, kc);
NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kc, KafkaConnectResources.deploymentName(clusterName));
String connectPodName = kubeClient().listPodNames(Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0);
Affinity connectPodSpecAffinity = kubeClient().getDeployment(KafkaConnectResources.deploymentName(clusterName)).getSpec().getTemplate().getSpec().getAffinity();
NodeSelectorRequirement connectPodNodeSelectorRequirement = connectPodSpecAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms().get(0).getMatchExpressions().get(0);
Pod connectPod = kubeClient().getPod(connectPodName);
NodeAffinity nodeAffinity = connectPod.getSpec().getAffinity().getNodeAffinity();
LOGGER.info("PodName: {}\nNodeAffinity: {}", connectPodName, nodeAffinity);
assertThat(connectPodNodeSelectorRequirement.getKey(), is(rackKey));
assertThat(connectPodNodeSelectorRequirement.getOperator(), is("Exists"));
KafkaConnectUtils.sendReceiveMessagesThroughConnect(connectPodName, topicName, kafkaClientsPodName, clusterOperator.getDeploymentNamespace(), clusterName);
// Revert changes for CO deployment
clusterOperator.unInstall();
clusterOperator = clusterOperator.defaultInstallation().createInstallation().runInstallation();
DeploymentUtils.waitTillDepHasRolled(ResourceManager.getCoDeploymentName(), 1, coSnapshot);
}
use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class RollingUpdateST method testClusterOperatorFinishAllRollingUpdates.
@IsolatedTest("Deleting Pod of Shared Cluster Operator")
@Tag(ROLLING_UPDATE)
void testClusterOperatorFinishAllRollingUpdates(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
// Changes to readiness probe should trigger a rolling update
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
kafka.getSpec().getZookeeper().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
}, namespace);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods(namespace).stream().filter(pod -> pod.getStatus().getPhase().equals("Running")).map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).size() < kubeClient().listPods().size());
LabelSelector coLabelSelector = kubeClient(INFRA_NAMESPACE).getDeployment(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName()).getSpec().getSelector();
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, zkSelector, 3, zkPods);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods().stream().map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).contains("Pending"));
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
}
Aggregations