use of io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName in project strimzi by strimzi.
the class KafkaST method testAppDomainLabels.
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testAppDomainLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(kafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
Map<String, String> labels;
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream().filter(pod -> pod.getMetadata().getName().startsWith(clusterName)).filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS)).collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
Map<String, String> kafkaLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Getting labels from stateful set of kafka resource");
verifyAppLabels(kafkaLabels);
Map<String, String> zooLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName));
LOGGER.info("Getting labels from stateful set of zookeeper resource");
verifyAppLabels(zooLabels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream().filter(service -> service.getMetadata().getName().startsWith(clusterName)).collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream().filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque")).collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName);
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
}
use of io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName in project strimzi by strimzi.
the class KafkaST method testJvmAndResources.
@ParallelNamespaceTest
void testJvmAndResources(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
ArrayList<SystemProperty> javaSystemProps = new ArrayList<>();
javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug").withValue("verbose").build());
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("1.5Gi")).addToLimits("cpu", new Quantity("1")).addToRequests("memory", new Quantity("1Gi")).addToRequests("cpu", new Quantity("50m")).build()).withNewJvmOptions().withXmx("1g").withXms("512m").withXx(jvmOptionsXX).endJvmOptions().endKafka().editZookeeper().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("1G")).addToLimits("cpu", new Quantity("0.5")).addToRequests("memory", new Quantity("0.5G")).addToRequests("cpu", new Quantity("25m")).build()).withNewJvmOptions().withXmx("1G").withXms("512M").withXx(jvmOptionsXX).endJvmOptions().endZookeeper().withNewEntityOperator().withNewTopicOperator().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("1024Mi")).addToLimits("cpu", new Quantity("500m")).addToRequests("memory", new Quantity("384Mi")).addToRequests("cpu", new Quantity("0.025")).build()).withNewJvmOptions().withXmx("2G").withXms("1024M").withJavaSystemProperties(javaSystemProps).endJvmOptions().endTopicOperator().withNewUserOperator().withResources(new ResourceRequirementsBuilder().addToLimits("memory", new Quantity("512M")).addToLimits("cpu", new Quantity("300m")).addToRequests("memory", new Quantity("256M")).addToRequests("cpu", new Quantity("30m")).build()).withNewJvmOptions().withXmx("1G").withXms("512M").withJavaSystemProperties(javaSystemProps).endJvmOptions().endUserOperator().endEntityOperator().endSpec().build());
// Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation
final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName);
final String kafkaStsName = kafkaStatefulSetName(clusterName);
final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName);
final Map<String, String> zkPods = PodUtils.podSnapshot(namespaceName, zkSelector);
final Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName);
assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "1536Mi", "1", "1Gi", "50m");
assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "-Xmx1g", "-Xms512m", "-XX:+UseG1GC");
assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "1G", "500m", "500M", "25m");
assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "-Xmx1G", "-Xms512M", "-XX:+UseG1GC");
Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName).stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName))).findFirst();
assertThat("EO pod does not exist", pod.isPresent(), is(true));
assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator", "1Gi", "500m", "384Mi", "25m");
assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator", "512M", "300m", "256M", "30m");
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator", "-Xmx2G", "-Xms1024M", null);
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator", "-Xmx1G", "-Xms512M", null);
String eoPod = eoPods.keySet().toArray()[0].toString();
kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> {
if (!container.getName().equals("tls-sidecar")) {
LOGGER.info("Check if -D java options are present in {}", container.getName());
String javaSystemProp = container.getEnv().stream().filter(envVar -> envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue();
String javaOpts = container.getEnv().stream().filter(envVar -> envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue();
assertThat(javaSystemProp, is("-Djavax.net.debug=verbose"));
if (container.getName().equals("topic-operator")) {
assertThat(javaOpts, is("-Xms1024M -Xmx2G"));
}
if (container.getName().equals("user-operator")) {
assertThat(javaOpts, is("-Xms512M -Xmx1G"));
}
}
});
LOGGER.info("Checking no rolling update for Kafka cluster");
RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, zkSelector, zkPods);
RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods);
}
use of io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName in project strimzi by strimzi.
the class RollingUpdateST method testClusterOperatorFinishAllRollingUpdates.
@IsolatedTest("Deleting Pod of Shared Cluster Operator")
@Tag(ROLLING_UPDATE)
void testClusterOperatorFinishAllRollingUpdates(ExtensionContext extensionContext) {
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespace, kafkaSelector);
Map<String, String> zkPods = PodUtils.podSnapshot(namespace, zkSelector);
// Changes to readiness probe should trigger a rolling update
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
kafka.getSpec().getZookeeper().setReadinessProbe(new ProbeBuilder().withTimeoutSeconds(6).build());
}, namespace);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods(namespace).stream().filter(pod -> pod.getStatus().getPhase().equals("Running")).map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).size() < kubeClient().listPods().size());
LabelSelector coLabelSelector = kubeClient(INFRA_NAMESPACE).getDeployment(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName()).getSpec().getSelector();
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, zkSelector, 3, zkPods);
TestUtils.waitFor("rolling update starts", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_STATUS_TIMEOUT, () -> kubeClient(namespace).listPods().stream().map(pod -> pod.getStatus().getPhase()).collect(Collectors.toList()).contains("Pending"));
LOGGER.info("Deleting Cluster Operator pod with labels {}", coLabelSelector);
kubeClient(INFRA_NAMESPACE).deletePodsByLabelSelector(coLabelSelector);
LOGGER.info("Cluster Operator pod deleted");
RollingUpdateUtils.waitTillComponentHasRolled(namespace, kafkaSelector, 3, kafkaPods);
}
use of io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName in project strimzi-kafka-operator by strimzi.
the class KafkaST method testLabelsAndAnnotationForPVC.
@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editSpec().editKafka().withNewTemplate().withNewStatefulset().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endStatefulset().withNewPodSet().withNewMetadata().withLabels(statefulSetLabels).endMetadata().endPodSet().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withStorage(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("20Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("10Gi").build()).build()).endKafka().editZookeeper().withNewTemplate().withNewPersistentVolumeClaim().withNewMetadata().addToLabels(pvcLabel).addToAnnotations(pvcAnnotation).endMetadata().endPersistentVolumeClaim().endTemplate().withNewPersistentClaimStorage().withDeleteClaim(false).withSize("3Gi").endPersistentClaimStorage().endZookeeper().endSpec().build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = StUtils.getLabelsOfStatefulSetOrStrimziPodSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
use of io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName in project strimzi-kafka-operator by strimzi.
the class RollingUpdateST method testKafkaAndZookeeperScaleUpScaleDown.
@ParallelNamespaceTest
@Tag(ACCEPTANCE)
@Tag(SCALABILITY)
void testKafkaAndZookeeperScaleUpScaleDown(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editSpec().editKafka().addToConfig(singletonMap("default.replication.factor", 1)).addToConfig("auto.create.topics.enable", "false").endKafka().endSpec().build());
Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
KafkaUser user = KafkaUserTemplates.tlsUser(namespaceName, clusterName, userName).build();
resourceManager.createResource(extensionContext, user);
testDockerImagesForKafkaCluster(clusterName, clusterOperator.getDeploymentNamespace(), namespaceName, 3, 1, false);
// kafka cluster already deployed
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
final int initialReplicas = kubeClient(namespaceName).getClient().pods().inNamespace(namespaceName).withLabelSelector(kafkaSelector).list().getItems().size();
assertEquals(3, initialReplicas);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, initialReplicas, initialReplicas).build());
resourceManager.createResource(extensionContext, false, KafkaClientsTemplates.kafkaClients(true, kafkaClientsName, user).build());
final String defaultKafkaClientsPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(namespaceName, kafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(defaultKafkaClientsPodName).withTopicName(topicName).withNamespaceName(namespaceName).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
// scale up
final int scaleTo = initialReplicas + 4;
LOGGER.info("Scale up Kafka to {}", scaleTo);
// Create snapshot of current cluster
String kafkaStsName = kafkaStatefulSetName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
kafka.getSpec().getKafka().setReplicas(scaleTo);
}, namespaceName);
kafkaPods = RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, scaleTo, kafkaPods);
LOGGER.info("Kafka scale up to {} finished", scaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat((int) kubeClient(namespaceName).listPersistentVolumeClaims().stream().filter(pvc -> pvc.getMetadata().getName().contains(KafkaResources.kafkaStatefulSetName(clusterName))).count(), is(scaleTo));
final int zookeeperScaleTo = initialReplicas + 2;
LOGGER.info("Scale up Zookeeper to {}", zookeeperScaleTo);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getZookeeper().setReplicas(zookeeperScaleTo), namespaceName);
RollingUpdateUtils.waitForComponentAndPodsReady(namespaceName, zkSelector, zookeeperScaleTo);
LOGGER.info("Kafka scale up to {} finished", zookeeperScaleTo);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
// scale down
LOGGER.info("Scale down Kafka to {}", initialReplicas);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getKafka().setReplicas(initialReplicas), namespaceName);
RollingUpdateUtils.waitTillComponentHasRolled(namespaceName, kafkaSelector, initialReplicas, kafkaPods);
LOGGER.info("Kafka scale down to {} finished", initialReplicas);
internalKafkaClient = internalKafkaClient.toBuilder().withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.consumesTlsMessagesUntilOperationIsSuccessful(MESSAGE_COUNT);
assertThat(kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(pvc -> pvc.getMetadata().getName().contains("data-" + KafkaResources.kafkaStatefulSetName(clusterName))).collect(Collectors.toList()).size(), is(initialReplicas));
// Create new topic to ensure, that ZK is working properly
String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, newTopicName).build());
internalKafkaClient = internalKafkaClient.toBuilder().withTopicName(newTopicName).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
internalKafkaClient.produceAndConsumesTlsMessagesUntilBothOperationsAreSuccessful();
}
Aggregations