use of io.strimzi.systemtest.Constants.SCALABILITY in project strimzi-kafka-operator by strimzi.
the class ConnectIsolatedST method testScaleConnectWithConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@Tag(CONNECTOR_OPERATOR)
void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 2).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("topics", topicName).endSpec().build());
String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(namespaceName, kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, connectDeploymentName);
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("has 0 replicas")), is(true));
}
use of io.strimzi.systemtest.Constants.SCALABILITY in project strimzi-kafka-operator by strimzi.
the class RollingUpdateST method testKafkaAndZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(SCALABILITY)
void testKafkaAndZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().addToConfig(singletonMap("default.replication.factor", 1)).addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaUser user = KafkaUserTemplates.tlsUser(namespaceName, clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
testDockerImagesForKafkaCluster(clusterName, clusterOperator.getDeploymentNamespace(), namespaceName, 3, 1, false);
// kafka cluster already deployed
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
final int initialReplicas = kubeClient(namespaceName).getClient().pods().inNamespace(namespaceName).withLabelSelector(kafkaSelector).list().getItems().size();
assertEquals(3, initialReplicas);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, initialReplicas, initialReplicas).build());
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String defaultKafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
// scale up
final int scaleTo = initialReplicas + 4;
LOGGER.info("Scale up Kafka to {}", scaleTo);
// Create snapshot of current cluster
String kafkaStsName = kafkaStatefulSetName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReplicas(scaleTo);
}, namespaceName);
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, scaleTo, kafkaPods);
LOGGER.info("Kafka scale up to {} finished", scaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat((int) kubeClient(namespaceName).listPersistentVolumeClaims().stream().filter(pvc -> pvc.getMetadata().getName().contains(KafkaResources.kafkaStatefulSetName(clusterName))).count(), is(scaleTo));
final int zookeeperScaleTo = initialReplicas + 2;
LOGGER.info("Scale up Zookeeper to {}", zookeeperScaleTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getZookeeper().setReplicas(zookeeperScaleTo), namespaceName);
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, zkSelector, zookeeperScaleTo);
LOGGER.info("Kafka scale up to {} finished", zookeeperScaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// scale down
LOGGER.info("Scale down Kafka to {}", initialReplicas);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().setReplicas(initialReplicas), namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, initialReplicas, kafkaPods);
LOGGER.info("Kafka scale down to {} finished", initialReplicas);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat(kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(pvc -> pvc.getMetadata().getName().contains("data-" + KafkaResources.kafkaStatefulSetName(clusterName))).collect(Collectors.toList()).size(), is(initialReplicas));
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, newTopicName).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(newTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
}
use of io.strimzi.systemtest.Constants.SCALABILITY in project strimzi by strimzi.
the class RollingUpdateST method testKafkaAndZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(SCALABILITY)
void testKafkaAndZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().addToConfig(singletonMap("default.replication.factor", 1)).addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaUser user = KafkaUserTemplates.tlsUser(namespaceName, clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
testDockerImagesForKafkaCluster(clusterName, clusterOperator.getDeploymentNamespace(), namespaceName, 3, 1, false);
// kafka cluster already deployed
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
final int initialReplicas = kubeClient(namespaceName).getClient().pods().inNamespace(namespaceName).withLabelSelector(kafkaSelector).list().getItems().size();
assertEquals(3, initialReplicas);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, initialReplicas, initialReplicas).build());
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String defaultKafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
// scale up
final int scaleTo = initialReplicas + 4;
LOGGER.info("Scale up Kafka to {}", scaleTo);
// Create snapshot of current cluster
String kafkaStsName = kafkaStatefulSetName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReplicas(scaleTo);
}, namespaceName);
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, scaleTo, kafkaPods);
LOGGER.info("Kafka scale up to {} finished", scaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat((int) kubeClient(namespaceName).listPersistentVolumeClaims().stream().filter(pvc -> pvc.getMetadata().getName().contains(KafkaResources.kafkaStatefulSetName(clusterName))).count(), is(scaleTo));
final int zookeeperScaleTo = initialReplicas + 2;
LOGGER.info("Scale up Zookeeper to {}", zookeeperScaleTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getZookeeper().setReplicas(zookeeperScaleTo), namespaceName);
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, zkSelector, zookeeperScaleTo);
LOGGER.info("Kafka scale up to {} finished", zookeeperScaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// scale down
LOGGER.info("Scale down Kafka to {}", initialReplicas);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().setReplicas(initialReplicas), namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, initialReplicas, kafkaPods);
LOGGER.info("Kafka scale down to {} finished", initialReplicas);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat(kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(pvc -> pvc.getMetadata().getName().contains("data-" + KafkaResources.kafkaStatefulSetName(clusterName))).collect(Collectors.toList()).size(), is(initialReplicas));
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, newTopicName).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(newTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
}
use of io.strimzi.systemtest.Constants.SCALABILITY in project strimzi by strimzi.
the class ConnectIsolatedST method testScaleConnectWithConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@Tag(CONNECTOR_OPERATOR)
void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 2).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("topics", topicName).endSpec().build());
String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(namespaceName, kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, connectDeploymentName);
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("has 0 replicas")), is(true));
}
Aggregations