use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaConnectAndConnectorStatus.
@ParallelTest
@Tag(CONNECT)
@Tag(CONNECTOR_OPERATOR)
@Tag(CONNECT_COMPONENTS)
void testKafkaConnectAndConnectorStatus(ExtensionContext extensionContext) {
String connectUrl = KafkaConnectResources.url(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, Constants.INFRA_NAMESPACE, 8083);
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, 1).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").withNamespace(Constants.INFRA_NAMESPACE).endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).editMetadata().withNamespace(Constants.INFRA_NAMESPACE).endMetadata().editSpec().addToConfig("topic", EXAMPLE_TOPIC_NAME).endSpec().build());
assertKafkaConnectStatus(1, connectUrl);
assertKafkaConnectorStatus(1, "RUNNING|UNASSIGNED", "source", List.of());
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100000000m")).build()), Constants.INFRA_NAMESPACE);
KafkaConnectUtils.waitForConnectNotReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100m")).build()), Constants.INFRA_NAMESPACE);
KafkaConnectUtils.waitForConnectReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectStatus(3, connectUrl);
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kc -> kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, "non-existing-connect-cluster")), Constants.INFRA_NAMESPACE);
KafkaConnectorUtils.waitForConnectorNotReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertThat(KafkaConnectorResource.kafkaConnectorClient().inNamespace(Constants.INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus().getConnectorStatus(), is(nullValue()));
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kc -> kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)), Constants.INFRA_NAMESPACE);
KafkaConnectorUtils.waitForConnectorReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectorStatus(1, "RUNNING|UNASSIGNED", "source", List.of(EXAMPLE_TOPIC_NAME));
String defaultClass = KafkaConnectorResource.kafkaConnectorClient().inNamespace(Constants.INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getSpec().getClassName();
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kc -> kc.getSpec().setClassName("non-existing-class"), Constants.INFRA_NAMESPACE);
KafkaConnectorUtils.waitForConnectorNotReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertThat(KafkaConnectorResource.kafkaConnectorClient().inNamespace(Constants.INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus().getConnectorStatus(), is(nullValue()));
KafkaConnectorResource.replaceKafkaConnectorResourceInSpecificNamespace(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kc -> {
kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME));
kc.getSpec().setClassName(defaultClass);
}, Constants.INFRA_NAMESPACE);
KafkaConnectorUtils.waitForConnectorReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectorStatus(3, "RUNNING|UNASSIGNED", "source", List.of(EXAMPLE_TOPIC_NAME));
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaMirrorMakerStatusWrongBootstrap.
@ParallelTest
@Tag(MIRROR_MAKER)
void testKafkaMirrorMakerStatusWrongBootstrap(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String mirrorMakerName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(mirrorMakerName, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, ClientUtils.generateRandomConsumerGroup(), 1, false).editMetadata().withNamespace(Constants.INFRA_NAMESPACE).endMetadata().build());
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(Constants.INFRA_NAMESPACE, mirrorMakerName);
assertKafkaMirrorMakerStatus(1, mirrorMakerName);
// Corrupt Mirror Maker pods
KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(mirrorMakerName, mm -> mm.getSpec().getConsumer().setBootstrapServers("non-exists-bootstrap"), Constants.INFRA_NAMESPACE);
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerNotReady(Constants.INFRA_NAMESPACE, mirrorMakerName);
// Restore Mirror Maker pods
KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(mirrorMakerName, mm -> mm.getSpec().getConsumer().setBootstrapServers(KafkaResources.plainBootstrapAddress(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)), Constants.INFRA_NAMESPACE);
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(Constants.INFRA_NAMESPACE, mirrorMakerName);
assertKafkaMirrorMakerStatus(3, mirrorMakerName);
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaTopicDecreaseStatus.
@ParallelTest
void testKafkaTopicDecreaseStatus(ExtensionContext extensionContext) throws InterruptedException {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, topicName, 5).editMetadata().withNamespace(Constants.INFRA_NAMESPACE).endMetadata().build());
int decreaseTo = 1;
LOGGER.info("Decreasing number of partitions to {}", decreaseTo);
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(topicName, kafkaTopic -> kafkaTopic.getSpec().setPartitions(decreaseTo), Constants.INFRA_NAMESPACE);
KafkaTopicUtils.waitForKafkaTopicPartitionChange(Constants.INFRA_NAMESPACE, topicName, decreaseTo);
KafkaTopicUtils.waitForKafkaTopicNotReady(Constants.INFRA_NAMESPACE, topicName);
assertKafkaTopicDecreasePartitionsStatus(topicName);
// Wait some time to check if error is still present in KafkaTopic status
LOGGER.info("Wait {} ms for next reconciliation", topicOperatorReconciliationInterval);
Thread.sleep(topicOperatorReconciliationInterval);
assertKafkaTopicDecreasePartitionsStatus(topicName);
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaStatus.
@ParallelTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testKafkaStatus(ExtensionContext extensionContext) {
LOGGER.info("Checking status of deployed kafka cluster");
KafkaUtils.waitForKafkaReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder().withTopicName(TOPIC_NAME).withNamespaceName(Constants.INFRA_NAMESPACE).withClusterName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).build();
externalKafkaClient.verifyProducedAndConsumedMessages(externalKafkaClient.sendMessagesPlain(), externalKafkaClient.receiveMessagesPlain());
assertKafkaStatus(1, KafkaResources.bootstrapServiceName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME) + "." + Constants.INFRA_NAMESPACE + ".svc");
KafkaResource.replaceKafkaResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, k -> {
k.getSpec().getEntityOperator().getTopicOperator().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100000m")).build());
});
LOGGER.info("Wait until cluster will be in NotReady state ...");
KafkaUtils.waitForKafkaNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
LOGGER.info("Recovery cluster to Ready state ...");
KafkaResource.replaceKafkaResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, k -> {
k.getSpec().getEntityOperator().getTopicOperator().setResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("100m")).build());
});
KafkaUtils.waitForKafkaReady(Constants.INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaStatus(3, KafkaResources.bootstrapServiceName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME) + "." + Constants.INFRA_NAMESPACE + ".svc");
}
use of io.strimzi.systemtest.annotations.ParallelTest in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method testKafkaUserStatusNotReady.
@ParallelTest
void testKafkaUserStatusNotReady(ExtensionContext extensionContext) {
// Simulate NotReady state with userName longer than 64 characters
String userName = "sasl-use-rabcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
KafkaUserUtils.waitForKafkaUserNotReady(Constants.INFRA_NAMESPACE, userName);
LOGGER.info("Checking status of deployed KafkaUser {}", userName);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(Constants.INFRA_NAMESPACE).withName(userName).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
LOGGER.info("KafkaUser Message: {}", kafkaCondition.getMessage());
LOGGER.info("KafkaUser Reason: {}", kafkaCondition.getReason());
assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(NotReady.toString()));
LOGGER.info("KafkaUser {} is in desired state: {}", userName, kafkaCondition.getType());
KafkaUserResource.kafkaUserClient().inNamespace(Constants.INFRA_NAMESPACE).withName(userName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(Constants.INFRA_NAMESPACE, userName);
}
Aggregations