use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorCustomCertTest method testPodToRestartIsEmptyAndNoCustomCertAnnotationsWhenNoCustomCertificates.
@Test
public void testPodToRestartIsEmptyAndNoCustomCertAnnotationsWhenNoCustomCertificates(VertxTestContext context) {
kafka = new KafkaBuilder(createKafka()).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).build(), new GenericKafkaListenerBuilder().withName("external").withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(true).build()).endKafka().endSpec().build();
Crds.kafkaOperation(client).inNamespace(namespace).withName(clusterName).patch(kafka);
Checkpoint async = context.checkpoint();
operator.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), kafka).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(functionArgumentCaptor, hasSize(1));
StatefulSet reconcileSts = client.apps().statefulSets().inNamespace(namespace).withName(KafkaResources.kafkaStatefulSetName(clusterName)).get();
assertThat(reconcileSts.getSpec().getTemplate().getMetadata().getAnnotations(), not(hasKey(KafkaCluster.ANNO_STRIMZI_CUSTOM_LISTENER_CERT_THUMBPRINTS)));
List<Function<Pod, List<String>>> capturedFunctions = functionArgumentCaptor;
assertThat(capturedFunctions, hasSize(1));
Function<Pod, List<String>> isPodToRestart = capturedFunctions.get(0);
assertThat(isPodToRestart.apply(getPod(reconcileSts)), empty());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorIngressKafkaListenerTest method testIngressV1Beta1.
@Test
public void testIngressV1Beta1(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("ingress").withPort(9094).withTls(true).withType(KafkaListenerType.INGRESS).withNewConfiguration().withNewBootstrap().withHost("bootstrap.mydomain.tld").endBootstrap().withBrokers(new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withHost("broker-0.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(1).withHost("broker-1.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(2).withHost("broker-2.mydomain.tld").build()).endConfiguration().build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().withNewEntityOperator().withNewUserOperator().endUserOperator().withNewTopicOperator().endTopicOperator().endEntityOperator().endSpec().build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture());
// Mock the KafkaSet operations
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(eq(NAMESPACE), eq(KafkaCluster.kafkaClusterName(NAME)))).thenReturn(Future.succeededFuture());
// Mock the StrimziPodSet operator
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
// Mock the Pod operations
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock ingress v1beta1 ops
IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations;
ArgumentCaptor<io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress> ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class);
when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress())));
when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock ingress v1 ops
IngressOperator mockIngressOps = supplier.ingressOperations;
ArgumentCaptor<Ingress> ingressCaptor = ArgumentCaptor.forClass(Ingress.class);
when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress())));
when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator op = new MockKafkaAssemblyOperatorForIngressTests(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup()));
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
op.reconcile(reconciliation).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(ingressCaptor.getAllValues().size(), is(0));
assertThat(ingressV1Beta1Captor.getAllValues().size(), is(4));
verify(mockIngressOps, never()).list(any(), any());
verify(mockIngressOps, never()).reconcile(any(), any(), any(), any());
verify(mockIngressOps, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorIngressKafkaListenerTest method testIngressV1.
@Test
public void testIngressV1(VertxTestContext context) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("ingress").withPort(9094).withTls(true).withType(KafkaListenerType.INGRESS).withNewConfiguration().withNewBootstrap().withHost("bootstrap.mydomain.tld").endBootstrap().withBrokers(new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withHost("broker-0.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(1).withHost("broker-1.mydomain.tld").build(), new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(2).withHost("broker-2.mydomain.tld").build()).endConfiguration().build()).withNewEphemeralStorage().endEphemeralStorage().endKafka().withNewZookeeper().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().endZookeeper().withNewEntityOperator().withNewUserOperator().endUserOperator().withNewTopicOperator().endTopicOperator().endEntityOperator().endSpec().build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the CRD Operator for Kafka resources
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture());
// Mock the KafkaSet operations
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(eq(NAMESPACE), eq(KafkaCluster.kafkaClusterName(NAME)))).thenReturn(Future.succeededFuture());
// Mock the StrimziPodSet operator
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
// Mock the Pod operations
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock ingress v1beta1 ops
IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations;
ArgumentCaptor<io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress> ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class);
when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress())));
when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
// Mock ingress v1 ops
IngressOperator mockIngressOps = supplier.ingressOperations;
ArgumentCaptor<Ingress> ingressCaptor = ArgumentCaptor.forClass(Ingress.class);
when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress())));
when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
KafkaAssemblyOperator op = new MockKafkaAssemblyOperatorForIngressTests(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_19), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup()));
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
op.reconcile(reconciliation).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(ingressCaptor.getAllValues().size(), is(4));
assertThat(ingressV1Beta1Captor.getAllValues().size(), is(0));
verify(mockIngressV1Beta1ops, never()).list(any(), any());
verify(mockIngressV1Beta1ops, never()).reconcile(any(), any(), any(), any());
verify(mockIngressV1Beta1ops, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class KafkaAssemblyOperatorManualRollingUpdatesTest method manualPodCleanup.
public void manualPodCleanup(VertxTestContext context, boolean useStrimziPodSets) {
Kafka kafka = new KafkaBuilder().withNewMetadata().withName(clusterName).withNamespace(namespace).withGeneration(2L).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build(), new PersistentClaimStorageBuilder().withId(1).withSize("100Gi").build()).endJbodStorage().endKafka().withNewZookeeper().withReplicas(3).withNewPersistentClaimStorage().withSize("100Gi").endPersistentClaimStorage().endZookeeper().endSpec().build();
KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
if (useStrimziPodSets) {
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, null)));
when(mockPodSetOps.deleteAsync(any(), any(), eq(zkCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getName()), any())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePodSet(kafka.getSpec().getKafka().getReplicas(), false, null, null, null)));
when(mockPodSetOps.deleteAsync(any(), any(), eq(kafkaCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getName()), any())).thenReturn(Future.succeededFuture());
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
} else {
StatefulSetOperator mockStsOps = supplier.stsOperations;
when(mockStsOps.getAsync(any(), eq(zkCluster.getName()))).thenReturn(Future.succeededFuture(zkCluster.generateStatefulSet(false, null, null)));
when(mockStsOps.getAsync(any(), eq(kafkaCluster.getName()))).thenReturn(Future.succeededFuture(kafkaCluster.generateStatefulSet(false, null, null, null)));
when(mockStsOps.deleteAsync(any(), any(), eq(zkCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockStsOps.deleteAsync(any(), any(), eq(kafkaCluster.getName()), anyBoolean())).thenReturn(Future.succeededFuture());
when(mockStsOps.reconcile(any(), any(), eq(zkCluster.getName()), any())).thenReturn(Future.succeededFuture());
when(mockStsOps.reconcile(any(), any(), eq(kafkaCluster.getName()), any())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, StrimziPodSet, StrimziPodSetList> mockPodSetOps = supplier.strimziPodSetOperator;
when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(null));
}
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> {
List<Pod> pods = new ArrayList<>();
pods.add(podWithName("my-cluster-zookeeper-0"));
pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-1", Collections.singletonMap(AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC, "true")));
pods.add(podWithName("my-cluster-zookeeper-2"));
return Future.succeededFuture(pods);
});
when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> {
List<Pod> pods = new ArrayList<>();
pods.add(podWithNameAndAnnotations("my-cluster-kafka-0", Collections.singletonMap(AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC, "true")));
pods.add(podWithName("my-cluster-kafka-1"));
pods.add(podWithName("my-cluster-kafka-2"));
return Future.succeededFuture(pods);
});
when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
PvcOperator mockPvcOps = supplier.pvcOperations;
when(mockPvcOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(zkCluster.generatePersistentVolumeClaims()));
when(mockPvcOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePersistentVolumeClaims(kafkaCluster.getStorage())));
CrdOperator mockKafkaOps = supplier.kafkaOperator;
when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka));
when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka);
when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture());
ClusterOperatorConfig config;
if (useStrimziPodSets) {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS, "+UseStrimziPodSets");
} else {
config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS, ClusterOperatorConfig.DEFAULT_OPERATION_TIMEOUT_MS);
}
MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
Checkpoint async = context.checkpoint();
kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Zookeeper rolling updates
assertThat(kao.cleanedPodsCount, is(2));
assertThat(kao.cleanedPods, Matchers.containsInAnyOrder("my-cluster-zookeeper-1", "my-cluster-kafka-0"));
assertThat(kao.cleanedPvcs, Matchers.containsInAnyOrder("data-my-cluster-zookeeper-1", "data-0-my-cluster-kafka-0", "data-1-my-cluster-kafka-0"));
assertThat(kao.createdPvcs, Matchers.containsInAnyOrder("data-my-cluster-zookeeper-1", "data-0-my-cluster-kafka-0", "data-1-my-cluster-kafka-0"));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder in project strimzi by strimzi.
the class ModelUtilsTest method testEmptyTolerations.
@ParallelTest
public void testEmptyTolerations() {
Toleration t1 = new TolerationBuilder().withValue("").withEffect("NoExecute").build();
Toleration t2 = new TolerationBuilder().withValue(null).withEffect("NoExecute").build();
PodTemplate pt1 = new PodTemplate();
pt1.setTolerations(singletonList(t1));
PodTemplate pt2 = new PodTemplate();
pt2.setTolerations(singletonList(t2));
Kafka kafka = new KafkaBuilder().withNewMetadata().withName("my-cluster").withNamespace("my-namespace").endMetadata().withNewSpec().withNewKafka().withReplicas(3).withNewEphemeralStorage().endEphemeralStorage().withListeners(new GenericKafkaListenerBuilder().withType(KafkaListenerType.INTERNAL).withPort(9092).withName("plain").withTls(false).build()).endKafka().endSpec().build();
KafkaCluster model1 = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
ModelUtils.parsePodTemplate(model1, pt1);
KafkaCluster model2 = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup());
ModelUtils.parsePodTemplate(model2, pt2);
assertThat(model1.getTolerations(), is(model2.getTolerations()));
}
Aggregations