use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class FeatureGatesIsolatedST method testControlPlaneListenerFeatureGate.
/**
* Control Plane Listener
* https://github.com/strimzi/proposals/blob/main/025-control-plain-listener.md
*/
@IsolatedTest("Feature Gates test for disabled ControlPlainListener")
@Tag(INTERNAL_CLIENTS_USED)
public void testControlPlaneListenerFeatureGate(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
int messageCount = 300;
List<EnvVar> testEnvVars = new ArrayList<>();
int kafkaReplicas = 1;
testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "-ControlPlaneListener", null));
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
LOGGER.info("Check for presence of ContainerPort 9090/tcp (tcp-ctrlplane) in first Kafka pod.");
final Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), clusterName + "-kafka-").get(0);
ContainerPort expectedControlPlaneContainerPort = new ContainerPort(9090, null, null, "tcp-ctrlplane", "TCP");
List<ContainerPort> kafkaPodPorts = kafkaPod.getSpec().getContainers().get(0).getPorts();
assertTrue(kafkaPodPorts.contains(expectedControlPlaneContainerPort));
Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
LOGGER.info("Try to send some messages to Kafka over next few minutes.");
KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
resourceManager.createResource(extensionContext, kafkaTopic);
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
LOGGER.info("Delete first found Kafka broker pod.");
kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
LOGGER.info("Force Rolling Update of Kafka via annotation.");
kafkaPods.keySet().forEach(podName -> {
kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
});
LOGGER.info("Wait for next reconciliation to happen.");
RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
LOGGER.info("Waiting for clients to finish sending/receiving messages.");
ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class FeatureGatesIsolatedST method testStrimziPodSetsFeatureGate.
/**
* UseStrimziPodSets feature gate
* https://github.com/strimzi/proposals/blob/main/031-statefulset-removal.md
*/
@IsolatedTest("Feature Gates test for enabled UseStrimziPodSets gate")
@Tag(INTERNAL_CLIENTS_USED)
public void testStrimziPodSetsFeatureGate(ExtensionContext extensionContext) {
assumeFalse(Environment.isOlmInstall() || Environment.isHelmInstall());
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String producerName = "producer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String consumerName = "consumer-test-" + new Random().nextInt(Integer.MAX_VALUE);
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final LabelSelector zooSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName));
final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
int messageCount = 600;
List<EnvVar> testEnvVars = new ArrayList<>();
int zooReplicas = 1;
int kafkaReplicas = 1;
testEnvVars.add(new EnvVar(Environment.STRIMZI_FEATURE_GATES_ENV, "+UseStrimziPodSets", null));
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES).withExtraEnvVars(testEnvVars).createInstallation().runInstallation();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaReplicas).build());
LOGGER.info("Try to send some messages to Kafka over next few minutes.");
KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(clusterName, topicName).editSpec().withReplicas(kafkaReplicas).withPartitions(kafkaReplicas).endSpec().build();
resourceManager.createResource(extensionContext, kafkaTopic);
KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder().withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicName).withMessageCount(messageCount).withDelayMs(500).withNamespaceName(clusterOperator.getDeploymentNamespace()).build();
resourceManager.createResource(extensionContext, kafkaBasicClientJob.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientJob.consumerStrimzi());
JobUtils.waitForJobRunning(consumerName, clusterOperator.getDeploymentNamespace());
// Delete one Zoo Pod
Pod zooPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), KafkaResources.zookeeperStatefulSetName(clusterName) + "-").get(0);
LOGGER.info("Delete first found ZooKeeper pod {}", zooPod.getMetadata().getName());
kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), zooPod);
RollingUpdateUtils.waitForComponentAndPodsReady(zooSelector, zooReplicas);
// Delete one Kafka Pod
Pod kafkaPod = PodUtils.getPodsByPrefixInNameWithDynamicWait(clusterOperator.getDeploymentNamespace(), KafkaResources.kafkaStatefulSetName(clusterName) + "-").get(0);
LOGGER.info("Delete first found Kafka broker pod {}", kafkaPod.getMetadata().getName());
kubeClient().deletePod(clusterOperator.getDeploymentNamespace(), kafkaPod);
RollingUpdateUtils.waitForComponentAndPodsReady(kafkaSelector, kafkaReplicas);
// Roll Zoo
LOGGER.info("Force Rolling Update of ZooKeeper via annotation.");
Map<String, String> zooPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), zooSelector);
zooPods.keySet().forEach(podName -> {
kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
});
LOGGER.info("Wait for next reconciliation to happen.");
RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), zooSelector, zooReplicas, zooPods);
// Roll Kafka
LOGGER.info("Force Rolling Update of Kafka via annotation.");
Map<String, String> kafkaPods = PodUtils.podSnapshot(clusterOperator.getDeploymentNamespace(), kafkaSelector);
kafkaPods.keySet().forEach(podName -> {
kubeClient(clusterOperator.getDeploymentNamespace()).editPod(podName).edit(pod -> new PodBuilder(pod).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true").endMetadata().build());
});
LOGGER.info("Wait for next reconciliation to happen.");
RollingUpdateUtils.waitTillComponentHasRolled(clusterOperator.getDeploymentNamespace(), kafkaSelector, kafkaReplicas, kafkaPods);
LOGGER.info("Waiting for clients to finish sending/receiving messages.");
ClientUtils.waitForClientSuccess(producerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
ClientUtils.waitForClientSuccess(consumerName, clusterOperator.getDeploymentNamespace(), MESSAGE_COUNT);
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class TopicOperatorTest method testReconcile_noResource_withKafka_withPrivate.
/**
* Test reconciliation when a resource has been deleted while the operator
* wasn't running
*/
@Test
public void testReconcile_noResource_withKafka_withPrivate(VertxTestContext context) throws InterruptedException {
Topic kubeTopic = null;
Topic kafkaTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).build();
Topic privateTopic = kafkaTopic;
CountDownLatch topicCreatedInKafkaAndStored = new CountDownLatch(2);
mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> topicCreatedInKafkaAndStored.countDown());
mockKafka.setDeleteTopicResponse(topicName, null);
mockTopicStore.setCreateTopicResponse(topicName, null);
mockTopicStore.create(kafkaTopic).onComplete(ar -> topicCreatedInKafkaAndStored.countDown());
mockTopicStore.setDeleteTopicResponse(topicName, null);
topicCreatedInKafkaAndStored.await();
LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString());
Checkpoint async = context.checkpoint();
topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> {
assertSucceeded(context, reconcileResult);
mockTopicStore.assertNotExists(context, topicName);
mockK8s.assertNotExists(context, topicName.asKubeName());
mockKafka.assertNotExists(context, topicName);
mockK8s.assertNoEvents(context);
async.flag();
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class TopicOperatorTest method resourceAdded.
/**
* Trigger {@link TopicOperator#onResourceEvent(LogContext, KafkaTopic, io.fabric8.kubernetes.client.Watcher.Action)}
* and have the Kafka and TopicStore respond with the given exceptions.
*/
private TopicOperator resourceAdded(VertxTestContext context, CountDownLatch latch, Exception createException, Exception storeException) throws InterruptedException {
mockKafka.setCreateTopicResponse(topicName.toString(), createException);
mockKafka.setTopicExistsResult(t -> Future.succeededFuture(false));
mockTopicStore.setCreateTopicResponse(topicName, storeException);
KafkaTopic kafkaTopic = new KafkaTopicBuilder().withMetadata(metadata).withNewSpec().withReplicas(2).withPartitions(10).endSpec().build();
mockKafka.setTopicMetadataResponses(topicName -> Future.succeededFuture(), topicName -> Future.succeededFuture(Utils.getTopicMetadata(TopicSerialization.fromTopicResource(kafkaTopic))));
LogContext logContext = LogContext.kubeWatch(Watcher.Action.ADDED, kafkaTopic);
Checkpoint async = context.checkpoint();
mockK8s.setGetFromNameResponse(new ResourceName(kafkaTopic), Future.succeededFuture(kafkaTopic));
topicOperator.onResourceEvent(logContext, kafkaTopic, ADDED).onComplete(ar -> {
if (createException != null || storeException != null) {
assertFailed(context, ar);
Class<? extends Exception> expectedExceptionType;
if (createException != null) {
expectedExceptionType = createException.getClass();
} else {
expectedExceptionType = storeException.getClass();
}
if (!expectedExceptionType.equals(ar.cause().getClass())) {
ar.cause().printStackTrace();
}
context.verify(() -> assertThat(ar.cause().getMessage(), ar.cause().getClass().getName(), is(expectedExceptionType.getName())));
TopicName topicName = TopicSerialization.fromTopicResource(kafkaTopic).getTopicName();
if (createException != null) {
mockKafka.assertNotExists(context, topicName);
} else {
mockKafka.assertExists(context, topicName);
}
mockTopicStore.assertNotExists(context, topicName);
// TODO mockK8s.assertContainsEvent(context, e -> "Error".equals(e.getKind()));
} else {
assertSucceeded(context, ar);
Topic expectedTopic = TopicSerialization.fromTopicResource(kafkaTopic);
mockKafka.assertContains(context, expectedTopic);
mockTopicStore.assertContains(context, expectedTopic);
mockK8s.assertNoEvents(context);
}
if (latch != null) {
latch.countDown();
}
async.flag();
});
if (!context.awaitCompletion(60, TimeUnit.SECONDS)) {
context.failNow(new Throwable("Test timeout"));
}
return topicOperator;
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class TopicOperatorTest method topicDeleted.
private void topicDeleted(VertxTestContext context, Exception storeException, Exception k8sException, boolean topicExists) {
Topic kubeTopic = new Topic.Builder(topicName.toString(), 10, (short) 2, map("cleanup.policy", "bar")).withMapName(resourceName).build();
Topic kafkaTopic = kubeTopic;
Topic privateTopic = kubeTopic;
mockK8s.setCreateResponse(resourceName, null).createResource(TopicSerialization.toTopicResource(kubeTopic, labels));
mockK8s.setDeleteResponse(resourceName, k8sException);
mockTopicStore.setCreateTopicResponse(topicName, null).create(privateTopic);
mockTopicStore.setDeleteTopicResponse(topicName, storeException);
mockKafka.setTopicExistsResult(t -> Future.succeededFuture(topicExists));
LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString());
Checkpoint async = context.checkpoint();
topicOperator.onTopicDeleted(logContext, topicName).onComplete(ar -> {
if (k8sException != null || storeException != null || topicExists) {
assertFailed(context, ar);
if (topicExists) {
mockK8s.assertExists(context, resourceName);
} else if (k8sException == null) {
mockK8s.assertNotExists(context, resourceName);
} else {
mockK8s.assertExists(context, resourceName);
}
mockTopicStore.assertExists(context, topicName);
} else {
assertSucceeded(context, ar);
mockK8s.assertNotExists(context, resourceName);
mockTopicStore.assertNotExists(context, topicName);
}
async.flag();
});
}
Aggregations